pax_global_header 0000666 0000000 0000000 00000000064 14241130220 0014477 g ustar 00root root 0000000 0000000 52 comment=d81bc08ef5fa49003cf861d28f4a5ee73ff43581
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/ 0000775 0000000 0000000 00000000000 14241130220 0021725 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/ 0000775 0000000 0000000 00000000000 14241130220 0023557 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver/ 0000775 0000000 0000000 00000000000 14241130220 0026253 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver/buildout.hash.cfg 0000664 0000000 0000000 00000003265 14241130220 0031513 0 ustar 00root root 0000000 0000000 # THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[template-nginx-service]
filename = template-nginx-service.sh.in
md5sum = 458870b70c33a1621b68961ae2372ad5
[template-nginx-configuration]
filename = template-nginx.cfg.in
md5sum = fc4f258e3ae56e0c9596484d736ccfed
[template-dcron-service]
filename = template-dcron-service.sh.in
md5sum = 851262d7174da868805cb7c8e1ced7c0
[template-backup-script]
filename = template-backup-script.sh.in
md5sum = 3f3286347a7e271e7bfa66e1a840989b
[template-crontab-line]
filename = template-crontab-line.in
md5sum = 5cbd64f04da0601ba4286516a6161f5e
[template-crontab]
filename = template-crontab.in
md5sum = f1f82101258de19068262b7213fc478b
[status2rss]
filename = status2rss.py
md5sum = 432d22bb0f67df5203bbc5d1134a952b
[template-update-rss-script]
filename = template-update-rss.sh.in
md5sum = ae4a0043414336a521b524d9c95f1c68
[template-pullrdiffbackup]
filename = instance-pullrdiffbackup.cfg.in
md5sum = 931038cfa23216af1628b960a2e10de6
[template]
filename = instance.cfg.in
md5sum = 974e21f30669e1b83e1e0cd8def0adc3
instance-pullrdiffbackup.cfg.in 0000664 0000000 0000000 00000020346 14241130220 0034246 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver {% set part_list = [] -%}
{% set crontab_line_list = [] -%}
###############################
#
# Instanciate dcron
#
###############################
[variables]
status_dirbasename = status
statistic_dirbasename = statistic
[directory]
recipe = slapos.cookbook:mkdirectory
etc = $${buildout:directory}/etc
bin = $${buildout:directory}/bin
srv = $${buildout:directory}/srv
var = $${buildout:directory}/var
run = $${:var}/run
log = $${:var}/log
varnginx = $${:var}/nginx
# scripts = $${:etc}/run
services = $${:etc}/service
cron-entries = $${:etc}/cron.d
cron-lines = $${:etc}/cron.lines
crontabs = $${:etc}/crontabs
cronstamps = $${:etc}/cronstamps
backup = $${:srv}/backup
status = $${:srv}/$${variables:status_dirbasename}
statistic = $${:srv}/$${variables:statistic_dirbasename}
backupscript = $${:etc}/backup
www = $${:srv}/www
home = $${:etc}/home
ssl = $${:etc}/ssl
ssh = $${:home}/.ssh
plugin = $${:etc}/plugin
#################################
# Cron service
#################################
[dcron-service]
recipe = slapos.recipe.template
url = ${template-dcron-service:output}
output = $${directory:services}/crond
logfile = $${directory:log}/crond.log
#################################
# Slave backup scripts and crontab
#################################
# Go throught slave list to set their configuration
{% for slave_instance in slave_instance_list -%}
{% set orig_slave_reference = slave_instance.get('slave_reference') -%}
{% set slave_reference = orig_slave_reference.replace(' ', '_') -%}
{% set frequency = slave_instance.get('frequency', '') -%}
{% set hostname = slave_instance.get('hostname', '') -%}
{% set connection = slave_instance.get('connection', '') -%}
{% set connection_port = slave_instance.get('connection_port', '22') -%}
{% set include = slave_instance.get('include', '') -%}
{% set include_string = "' --include='".join(include.split(' ')) -%}
{% set exclude = slave_instance.get('exclude', '') -%}
{% set exclude_string = '' -%}
{% set sudo = slave_instance.get('sudo', 'False') -%}
{% set remote_schema = slave_instance.get('remote_rdiff_path', 'rdiff-backup') + ' --server --restrict-read-only / -- "$@"' -%}
{% if (exclude != '') -%}
{% set exclude_string = "' --exclude='".join(exclude.split(' ')) -%}
{% set exclude_string = "--exclude='" + exclude_string + "'" -%}
{% endif -%}
{% if (sudo == 'True') -%}
{% set remote_schema = 'sudo backupagent_rdiff-backup' -%}
{% endif -%}
{% if (frequency != '') and (hostname != '') and (connection != '') and (include != '') -%}
[{{ slave_reference }}-backup-directory]
recipe = slapos.cookbook:mkdirectory
directory = $${directory:backup}/$${:_buildout_section_name_}
[{{ slave_reference }}-backup-private_key]
recipe = plone.recipe.command
stop-on-error = true
command = ${coreutils-output:rm} -f $${:key} $${:public_key} && ${openssh-output:keygen} -t $${:type} -b 2048 -f $${:key} -q -N ""
key = $${directory:ssh}/$${:_buildout_section_name_}
public_key = $${:key}.pub
location = $${:public_key}
type = rsa
# Insert as a beginning part, to ensure that all public keys are generated before trying to publish. This will reduce the number of slapgrid-cp run.
{% do part_list.insert(0, "%s-backup-private_key" % slave_reference) -%}
[{{ slave_reference }}-backup-read-public_key]
recipe = slapos.cookbook:readline
storage-path = {{ '$${' ~ slave_reference }}-backup-private_key:public_key}
# Publish slave {{ slave_reference }} information
[{{ slave_reference }}-backup-publish]
recipe = slapos.cookbook:publish
-slave-reference = {{ orig_slave_reference }}
authorized_key = {{ '$${' ~ slave_reference }}-backup-read-public_key:readline}
rss = https://[$${nginx-configuration:ip}]:$${nginx-configuration:port}/{{ '$${' ~ slave_reference }}-backup-script:status_name}.rss
{% do part_list.append("%s-backup-publish" % slave_reference) -%}
[{{ slave_reference }}-promise-check-backup]
recipe = slapos.cookbook:promise.plugin
eggs =
slapos.toolbox
output = $${directory:plugin}/{{ slave_reference }}_check_backup.py
module = slapos.promise.plugin.backupserver_check_backup
config-status_dirbasename = $${variables:status_dirbasename}
config-status_name = {{ '$${' ~ slave_reference }}-backup-script:status_name}
config-status_fullpath = {{ '$${' ~ slave_reference }}-backup-script:status_log}
config-script_fullpath = {{ '$${' ~ slave_reference }}-backup-script:output}
config-cron_frequency = {{ frequency }}
config-monitor_url = $${monitor-publish:monitor-base-url}
config-statistic_dirbasename = $${variables:statistic_dirbasename}
config-statistic_name = {{ '$${' ~ slave_reference }}-backup-script:statistic_name}
{% do part_list.append("%s-promise-check-backup" % slave_reference) -%}
[{{ slave_reference }}-backup-script]
recipe = slapos.recipe.template
url = ${template-backup-script:output}
output = $${directory:backupscript}/$${:_buildout_section_name_}
datadirectory = {{ '$${' ~ slave_reference }}-backup-directory:directory}
sshkey = {{ '$${' ~ slave_reference }}-backup-private_key:key}
connection = {{ connection }}
connection_port = {{ connection_port }}
hostname = {{ hostname }}
include = {{ include_string }}
exclude_string = {{ exclude_string }}
remote_schema = {{ remote_schema }}
status_name = {{ slave_reference }}_status.txt
statistic_name = {{ slave_reference }}_statistic.txt
status_log = $${directory:status}/$${:status_name}
statistic_log = $${directory:statistic}/$${:statistic_name}
[{{ slave_reference }}-backup-crontab-line]
recipe = slapos.recipe.template
url = ${template-crontab-line:output}
output = $${directory:cron-lines}/$${:_buildout_section_name_}
script = {{ '$${' ~ slave_reference }}-backup-script:output}
frequency = {{ frequency }}
{% do crontab_line_list.append("$${%s-backup-crontab-line:output}" % slave_reference) -%}
{% endif -%}
{% endfor -%}
#################################
# Generate crontab file
#################################
[update-rss-script]
recipe = slapos.recipe.template
url = ${template-update-rss-script:output}
output = $${directory:etc}/$${:_buildout_section_name_}
global_rss = $${slap-connection:computer-id}-$${slap-connection:partition-id}.rss
[update-rss-crontab-line]
recipe = slapos.recipe.template
url = ${template-crontab-line:output}
output = $${directory:cron-lines}/$${:_buildout_section_name_}
script = $${update-rss-script:output}
frequency = */5 * * * *
{% do crontab_line_list.append("$${update-rss-crontab-line:output}") -%}
[publish-global-rss]
recipe = slapos.cookbook:publish
<= monitor-publish
rss = https://[$${nginx-configuration:ip}]:$${nginx-configuration:port}/$${update-rss-script:global_rss}
{% set crontab_line_list_string = " ".join(crontab_line_list) -%}
[activate-crontab-file]
# XXX File is never removed
recipe = plone.recipe.command
stop-on-error = true
command = ${coreutils-output:cat} ${template-crontab:output} {{ crontab_line_list_string }} | ${dcron-output:crontab} -c $${directory:crontabs} -
#################################
# Nginx service
#################################
[nginx-service]
recipe = slapos.recipe.template
url = ${template-nginx-service:output}
output = $${directory:services}/nginx
virtual-depends =
$${nginx-configuration:ip}
[nginx-listen-promise]
<= monitor-promise-base
promise = check_socket_listening
name = nginx_listen.py
config-host = $${nginx-configuration:ip}
config-port = $${nginx-configuration:port}
[nginx-configuration]
recipe = slapos.recipe.template
url = ${template-nginx-configuration:output}
output = $${directory:etc}/nginx.cfg
access_log = $${directory:log}/nginx-access.log
error_log = $${directory:log}/nginx-error.log
ip = {{ partition_ipv6 }}
port = 9443
ssl_key = $${directory:ssl}/nginx.key
ssl_csr = $${directory:ssl}/nginx.csr
ssl_crt = $${directory:ssl}/nginx.crt
#################################
# Monitoring
#################################
[monitor-instance-parameter]
monitor-httpd-port = 9687
[monitor-conf-parameters]
private-path-list +=
$${directory:statistic}
$${directory:status}
# Add parts generated by template
[buildout]
extends =
${monitor-template:output}
parts =
dcron-service
nginx-service
nginx-listen-promise
activate-crontab-file
publish-global-rss
monitor-base
{% for part in part_list -%}
{{ ' %s' % part }}
{% endfor -%}
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
offline = true
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver/instance.cfg.in 0000664 0000000 0000000 00000002200 14241130220 0031137 0 ustar 00root root 0000000 0000000 [buildout]
parts =
switch-softwaretype
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
offline = true
[dynamic-template-pullrdiffbackup]
recipe = slapos.recipe.template:jinja2
url = ${template-pullrdiffbackup:output}
output = $${buildout:parts-directory}/$${:_buildout_section_name_}/$${:filename}
filename = instance-pullrdiffbackup.cfg
extensions = jinja2.ext.do
context =
key slave_instance_list slap-configuration:slave-instance-list
# partition_ipv6 is the random ipv6 allocated to the local partition
key partition_ipv6 slap-configuration:ipv6-random
[switch-softwaretype]
recipe = slapos.cookbook:switch-softwaretype
RootSoftwareInstance = $${:pullrdiffbackup}
pullrdiffbackup = dynamic-template-pullrdiffbackup:output
[slap-configuration]
# Fetches parameters defined in SlapOS Master for this instance.
# Always the same.
recipe = slapos.cookbook:slapconfiguration.serialised
computer = $${slap-connection:computer-id}
partition = $${slap-connection:partition-id}
url = $${slap-connection:server-url}
key = $${slap-connection:key-file}
cert = $${slap-connection:cert-file}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver/software.cfg 0000664 0000000 0000000 00000005131 14241130220 0030566 0 ustar 00root root 0000000 0000000 [buildout]
extends =
buildout.hash.cfg
../../component/dash/buildout.cfg
../../component/dcron/buildout.cfg
../../component/logrotate/buildout.cfg
../../component/openssl/buildout.cfg
../../component/nginx/buildout.cfg
../../component/rdiff-backup/buildout.cfg
../../component/rsync/buildout.cfg
../../component/openssh/buildout.cfg
../../component/grep/buildout.cfg
../../component/findutils/buildout.cfg
../../component/util-linux/buildout.cfg
../../stack/slapos.cfg
../../stack/monitor/buildout.cfg
parts =
dcron
logrotate
nginx
openssl
rsync
template
template-pullrdiffbackup
template-backup-script
template-crontab-line
slapos-cookbook
[rssgen-eggs]
recipe = zc.recipe.egg
interpreter = python-${:_buildout_section_name_}
eggs =
PyRSS2Gen
python-dateutil
##########################################################
# Service startup scripts and configuration files
##########################################################
[template-nginx-service]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/${:filename}
[template-nginx-configuration]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/${:filename}
[template-dcron-service]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/${:filename}
[template-backup-script]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/${:filename}
[template-crontab-line]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/${:filename}
[template-crontab]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/${:filename}
[status2rss]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/${:filename}
[template-update-rss-script]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/${:filename}
##########################################################
# Buildout instance.cfg templates
##########################################################
[template-pullrdiffbackup]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/template-pullrdiffbackup.cfg
[template]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/template.cfg
[versions]
gunicorn = 19.1.1
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver/status2rss.py 0000664 0000000 0000000 00000002014 14241130220 0030757 0 ustar 00root root 0000000 0000000 import datetime
import uuid
import PyRSS2Gen
import sys
from dateutil.parser import parse
import base64
# Based on http://thehelpfulhacker.net/2011/03/27/a-rss-feed-for-your-crontabs/
# ### Defaults
TITLE = sys.argv[1]
LINK = sys.argv[2]
DESCRIPTION = TITLE
items = []
while 1:
try:
line = sys.stdin.readline()
except KeyboardInterrupt:
break
if not line:
break
time, statistic, desc = line.split(', ', 2)
rss_item = PyRSS2Gen.RSSItem(
title = desc,
description = "
%s
" % " ".join(("%s, %s\nLastest statistic \n%s" % (time, desc,
open(statistic).read())).split("\n")),
pubDate = parse(time),
guid = PyRSS2Gen.Guid(base64.b64encode("%s, %s" % (time, desc)), isPermaLink=0)
)
items.append(rss_item)
### Build the rss feed
rss_feed = PyRSS2Gen.RSS2 (
title = TITLE,
link = LINK,
description = DESCRIPTION,
lastBuildDate = datetime.datetime.utcnow(),
items = items
)
print rss_feed.to_xml()
template-backup-script.sh.in 0000664 0000000 0000000 00000003014 14241130220 0033513 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver #!${dash-output:dash}
# don't run if rdiff-backup is still running
# in that case, the promise will detect that previous rdiff backup took too long and report an error
${util-linux:location}/bin/flock --nb $${:status_log}.lock true || exit
export HOME=$${directory:home}
# Inform about beginning of backup
${coreutils-output:echo} "`${coreutils-output:date} --iso-8601=seconds -u`, $${:statistic_log}, $${:hostname}, backup running" > $${:status_log}
${coreutils-output:echo} "Available only if backup succeed." > $${:statistic_log}
# set -e
cd $${:datadirectory}
${util-linux:location}/bin/flock $${:status_log}.lock \
${rdiff-backup-1.0.5:location}/bin/rdiff-backup \
$${:exclude_string} \
--include='$${:include}' \
--exclude='**' \
--remote-schema '${openssh-output:ssh} -q -T -y -o "StrictHostKeyChecking no" -i $${:sshkey} -p $${:connection_port} %s $${:remote_schema}' \
$${:connection}::/ ./
RESULT=$?
# Inform about backup status
if [ $RESULT -eq 0 ]
then
${coreutils-output:echo} "`${coreutils-output:date} --iso-8601=seconds -u`, $${:statistic_log}, $${:hostname}, backup success" >> $${:status_log}
${findutils-output:find} rdiff-backup-data/ -maxdepth 1 -name "session_statistic*" | ${coreutils-output:sort} | ${coreutils-output:tail} -n 1 | ${findutils-output:xargs} ${rdiff-backup-1.0.5:location}/bin/rdiff-backup --calculate-average > $${:statistic_log}
else
${coreutils-output:echo} "`${coreutils-output:date} --iso-8601=seconds -u`, $${:statistic_log}, $${:hostname}, backup failed" >> $${:status_log}
fi
template-crontab-line.in 0000664 0000000 0000000 00000000072 14241130220 0032711 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver # $${:_buildout_section_name_}
$${:frequency} $${:script}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver/template-crontab.in 0000664 0000000 0000000 00000000131 14241130220 0032037 0 ustar 00root root 0000000 0000000 # min(0-59) hours(0-23) day(1-31) month(1-12) dow(0-7) command
MAILTO=admins@erp5.org
template-dcron-service.sh.in 0000664 0000000 0000000 00000000466 14241130220 0033517 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver #!${dash-output:dash}
# BEWARE: This file is operated by slapos node
# BEWARE: It will be overwritten automatically
exec ${dcron-output:crond} \
-s $${directory:cron-entries} \
-c $${directory:crontabs} \
-t $${directory:cronstamps} \
-f -l 5 \
-L $${dcron-service:logfile}
# -M cron_simplelogger
template-nginx-service.sh.in 0000664 0000000 0000000 00000001255 14241130220 0033532 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver #!${dash-output:dash}
# BEWARE: This file is operated by slapos node
# BEWARE: It will be overwritten automatically
if [ ! -e $${nginx-configuration:ssl_crt} ]
then
${openssl-output:openssl} genrsa -out $${nginx-configuration:ssl_key} 2048
${openssl-output:openssl} req -new \
-subj "/C=AA/ST=Denial/L=Nowhere/O=Dis/CN=$${nginx-configuration:ip}" \
-key $${nginx-configuration:ssl_key} -out $${nginx-configuration:ssl_csr}
${openssl-output:openssl} x509 -req -days 365 \
-in $${nginx-configuration:ssl_csr} \
-signkey $${nginx-configuration:ssl_key} \
-out $${nginx-configuration:ssl_crt}
fi
exec ${nginx-output:nginx} \
-c $${nginx-configuration:output}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver/template-nginx.cfg.in0000664 0000000 0000000 00000003332 14241130220 0032276 0 ustar 00root root 0000000 0000000 daemon off; # run in the foreground so supervisord can look after it
worker_processes 4;
pid $${directory:run}/nginx.pid;
events {
worker_connections 768;
# multi_accept on;
}
error_log $${nginx-configuration:error_log};
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
default_type application/octet-stream;
include ${nginx-output:mime};
##
# Logging Settings
##
access_log $${nginx-configuration:access_log};
error_log $${nginx-configuration:error_log};
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
server {
listen [$${nginx-configuration:ip}]:$${nginx-configuration:port};
ssl on;
ssl_certificate $${nginx-configuration:ssl_crt};
ssl_certificate_key $${nginx-configuration:ssl_key};
fastcgi_temp_path $${directory:varnginx} 1 2;
uwsgi_temp_path $${directory:varnginx} 1 2;
scgi_temp_path $${directory:varnginx} 1 2;
client_body_temp_path $${directory:varnginx} 1 2;
proxy_temp_path $${directory:varnginx} 1 2;
## Only allow GET and HEAD request methods
if ($request_method !~ ^(GET|HEAD)$ ) {
return 444;
}
## Serve an error 204 (No Content) for favicon.ico
location = /favicon.ico {
return 204;
}
location /
{
root $${directory:www};
# index index.html;
}
}
}
template-update-rss.sh.in 0000664 0000000 0000000 00000001250 14241130220 0033033 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver #!${dash-output:dash}
STATUS_DIR=$${directory:status}
RSS_DIR=$${directory:www}
TAIL=${coreutils-output:tail}
PYTHON=${buildout:bin-directory}/${rssgen-eggs:interpreter}
STATUS2RSS=${status2rss:output}
BASENAME=${coreutils-output:basename}
for status in $STATUS_DIR/*.txt
do
NAME=`$BASENAME $status`
$TAIL -n 1 -q $status | $PYTHON $STATUS2RSS "Backup status $NAME" "https://[$${nginx-configuration:ip}]:$${nginx-configuration:port}/$NAME.rss" > $RSS_DIR/$NAME.rss
done
$TAIL -n 1 -q $STATUS_DIR/*.txt | $PYTHON $STATUS2RSS "Full backup status $${:global_rss}" "https://[$${nginx-configuration:ip}]:$${nginx-configuration:port}/$${:global_rss}" > $RSS_DIR/$${:global_rss}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver/test/ 0000775 0000000 0000000 00000000000 14241130220 0027232 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver/test/README.md 0000664 0000000 0000000 00000000050 14241130220 0030504 0 ustar 00root root 0000000 0000000 Tests for backupserver software release
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver/test/setup.py 0000664 0000000 0000000 00000003676 14241130220 0030760 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2019 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.backupserver'
long_description = open("README.md").read()
setup(name=name,
version=version,
description="Test for SlapOS' backupserver",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'slapos.libnetworkcache',
'erp5.util',
'requests',
],
zip_safe=True,
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/backupserver/test/test.py 0000664 0000000 0000000 00000004736 14241130220 0030575 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2019 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import httplib
import json
import os
import requests
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, InstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'software.cfg')))
class TestBackupServer(InstanceTestCase):
def test(self):
parameter_dict = self.computer_partition.getConnectionParameterDict()
# Check that there is a RSS feed
self.assertTrue('rss' in parameter_dict)
self.assertTrue(parameter_dict['rss'].startswith(
'https://[%s]:9443/' % (self._ipv6_address, )
))
result = requests.get(
parameter_dict['rss'], verify=False, allow_redirects=False)
# XXX crontab not triggered yet
self.assertEqual(
[httplib.NOT_FOUND, False],
[result.status_code, result.is_redirect]
)
# Check monitor
self.assertTrue('monitor-base-url' in parameter_dict)
self.assertTrue('monitor-setup-url' in parameter_dict)
result = requests.get(
parameter_dict['monitor-base-url'], verify=False, allow_redirects=False)
self.assertEqual(
[httplib.UNAUTHORIZED, False],
[result.status_code, result.is_redirect]
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/beremiz-ide/ 0000775 0000000 0000000 00000000000 14241130220 0025753 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/beremiz-ide/buildout.hash.cfg 0000664 0000000 0000000 00000002141 14241130220 0031203 0 ustar 00root root 0000000 0000000 # THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[instance]
filename = instance.cfg.in
md5sum = c4079d70ab3268234651bf6c36234b2f
[template-instance-beremiz]
filename = instance-beremiz.cfg.jinja2.in
md5sum = 51071494633f4ffba700baf935dc6955
[template-instance-beremiz-test]
filename = instance-beremiz-test.cfg.jinja2.in
md5sum = ff7cf06927041f6aec5ad559950b69cb
[template-fluxbox-menu.in]
filename = fluxbox-menu.in
md5sum = 09560314eae0225b6085f8626f1a603a
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/beremiz-ide/fluxbox-menu.in 0000664 0000000 0000000 00000001240 14241130220 0030731 0 ustar 00root root 0000000 0000000 [begin] (Fluxbox)
[encoding] {UTF-8}
[exec] (Beremiz IDE) { {{ beremiz_bin }} }
[exec] (Terminal) { {{ xterm_bin }} }
[submenu] (Fluxbox menu)
[config] (Configure)
[submenu] (System Styles) {Choose a style...}
[stylesdir] ({{ fluxbox_location }}/share/fluxbox/styles)
[end]
[submenu] (User Styles) {Choose a style...}
[stylesdir] (~/.fluxbox/styles)
[end]
[workspaces] (Workspace List)
[commanddialog] (Fluxbox Command)
[reconfig] (Reload config)
[restart] (Restart)
[exec] (About) {(fluxbox -v; fluxbox -info | sed 1d) | xmessage -file - -center}
[separator]
[exit] (Exit)
[end]
[endencoding]
[end]
instance-beremiz-test.cfg.jinja2.in 0000664 0000000 0000000 00000003777 14241130220 0034370 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/beremiz-ide [directory]
tests = ${:srv}/tests
[xserver]
display-num = 42
resolution = 1920x1080x24
[wmctrl]
recipe = slapos.cookbook:wrapper
command-line = {{ wmctrl_bin }}
wrapper-path = ${directory:bin}/wmctrl
environment =
XORG_LOCK_DIR=${xserver:lock-dir}
DISPLAY=${xserver:display}
[xdotool]
recipe = slapos.cookbook:wrapper
command-line = {{ xdotool_bin }}
wrapper-path = ${directory:bin}/xdotool
environment =
XORG_LOCK_DIR=${xserver:lock-dir}
DISPLAY=${xserver:display}
[runTestSuite]
env.sh = ${beremiz-env.sh:output}
workdir = {{ nxdtest_dir }}
[beremiz-tests]
recipe = slapos.recipe.template
inline =
#!/bin/sh -e
cd {{ beremiz_location }}/tests
testdir=$SLAPOS_TEST_LOG_DIRECTORY
if [ -z "$testdir" ]; then
testdir=${directory:tests}
fi
make test_dir=$testdir xserver_command='echo "Using ${xserver:display} on Slapos X Server !";' "$@"
output = ${directory:bin}/beremiztest
[sikulix]
recipe = slapos.cookbook:wrapper
command-line = {{ sikulix_bin }} -v
wrapper-path = ${directory:bin}/sikulix
environment =
JAVA_TOOL_OPTIONS=-Duser.home=${buildout:directory} -Djava.io.tmpdir=${directory:tmp}
[libopencv_java430.so]
recipe = plone.recipe.command
opencv-link = ${buildout:directory}/.Sikulix/SikulixLibs/libopencv_java430.so
# We run sikulix with a random not existing projet so it will initialise all files
# and extract libopencv_java430.so from jar. We can then replace it with slapos
# compiled lib which solves GLIBC issues (version `GLIBC_2.27' not found).
command =
LINK=${:opencv-link}
if [ ! -e "$LINK" ]; then
${sikulix:wrapper-path} -r not_found_for_slapos.sikulix > /dev/null 2>&1 || true
fi
rm -f $LINK
ln -sf {{ opencv_location }}/lib/libopencv_java430.so $LINK
update-command = ${:command}
stop-on-error = true
[buildout]
extends =
{{ instance_nxdtest }}
{{ instance_beremiz }}
parts +=
sikulix
wmctrl
xdotool
beremiz-tests
runTestSuite
libopencv_java430.so
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
instance-beremiz.cfg.jinja2.in 0000664 0000000 0000000 00000016414 14241130220 0033403 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/beremiz-ide {% set part_list = [] -%}
{% set ipv6 = (ipv6 | list)[0] -%}
{% set ipv4 = (ipv4 | list)[0] -%}
[directory]
recipe = slapos.cookbook:mkdirectory
etc = ${buildout:directory}/etc
bin = ${buildout:directory}/bin
srv = ${buildout:directory}/srv
var = ${buildout:directory}/var
tmp = ${buildout:directory}/tmp
log = ${:var}/log
vnc = ${buildout:directory}/.vnc
scripts = ${:etc}/run
services = ${:etc}/service
promise = ${:etc}/promise
ssl = ${:etc}/ssl
auth = ${:tmp}/auth
workdir = ${:srv}/workdir
framebuffer = ${:srv}/framebuffer
fluxbox = ${buildout:directory}/.fluxbox
[gen-certificate]
recipe = plone.recipe.command
command = "{{ openssl_bin }}" req -newkey rsa -batch -new -x509 -days 3650 -nodes -keyout "${:key-file}" -out "${:cert-file}"
stop-on-error = true
cert-file = ${directory:ssl}/beremiz.crt
key-file = ${directory:ssl}/beremiz.key
[novnc-instance]
recipe = slapos.cookbook:novnc
path = ${directory:bin}/novnc
ip = {{ ipv6 }}
port = 6080
vnc-ip = {{ ipv4 }}
vnc-port = ${x11vnc:port}
novnc-location = {{ novnc_location }}
websockify-path = {{ websockify_bin }}
ssl-key-path = ${gen-certificate:key-file}
ssl-cert-path = ${gen-certificate:cert-file}
[websockify-sighandler]
recipe = slapos.cookbook:signalwrapper
wrapper-path = ${directory:bin}/websockify-sighandler
wrapped-path = ${novnc-instance:path}
[websockify-sighandler-service]
recipe = slapos.cookbook:wrapper
command-line = ${websockify-sighandler:wrapper-path}
wrapper-path = ${directory:services}/websockify
hash-existing-files = ${buildout:directory}/software_release/buildout.cfg
[novnc-promise]
<= monitor-promise-base
promise = check_socket_listening
name = novnc_promise.py
config-host = ${novnc-instance:ip}
config-port = ${novnc-instance:port}
[x11vnc]
recipe = slapos.cookbook:wrapper
port = 5901
command-line = {{ x11vnc_bin }} -forever -display ${xserver:display} -ncache 10
-noxdamage -rfbport ${:port} -no6 -noipv6 -reopen -o ${directory:log}/x11vnc.log
-usepw -rfbauth ${x11vnc-passwd:passfile} -desktop BeremizVNC
-listen {{ ipv4 }} -xkb
wrapper-path = ${directory:services}/x11vnc
hash-existing-files = ${buildout:directory}/software_release/buildout.cfg
environment =
XORG_LOCK_DIR=${directory:run}
wait-for-files =
${xserver:lock-file}
[x11vnc-listen-promise]
<= monitor-promise-base
promise = check_socket_listening
name = x11vnc_listening.py
config-host = {{ ipv4 }}
config-port = ${x11vnc:port}
[random-password]
recipe = slapos.cookbook:generate.password
storage-path = ${directory:etc}/.passwd
bytes = 8
[x11vnc-passwd]
recipe = slapos.recipe.template
passfile = ${directory:vnc}/passwd
inline =
#!/bin/sh -e
if [ -s "${:passfile}" ]; then
echo "Password initialized.";
else
chmod 700 $(dirname ${:passfile});
{{ x11vnc_bin }} -storepasswd ${random-password:passwd} ${:passfile};
fi
output = ${directory:bin}/x11vnc_passwd
[generate-vnc-password]
recipe = plone.recipe.command
stop-on-error = true
command = ${x11vnc-passwd:output}
update-command = ${:command}
# Generate a fonts.conf file.
[font-config]
recipe = slapos.recipe.template:jinja2
url = {{ font_config_tmplt }}
output = ${directory:etc}/fonts.conf
context =
key cachedir :cache-dir
key fonts :fonts
key includes :includes
cache-dir =
${directory:etc}/.fontconfig.cache
fonts =
{{ font_dejavu }}
{{ font_liberation }}
includes =
{{ font_config_loc }}/etc/fonts/conf.d
[xserver]
recipe = slapos.cookbook:wrapper
command-line = {{ xvfb_bin }} ${:display} -screen 0 ${:resolution}x24
-fbdir ${directory:framebuffer}
wrapper-path = ${directory:services}/xserver
hash-existing-files = ${buildout:directory}/software_release/buildout.cfg
environment =
XORG_LOCK_DIR=${:lock-dir}
FONTCONFIG_FILE=${font-config:output}
display-num = 0
display = :${:display-num}
resolution = {{ slapparameter_dict.get('screen-resolution', '1280x1024') }}
lock-dir = ${directory:run}
lock-file = ${:lock-dir}/.X${:display-num}-lock
[xserver-promise]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:promise}/xserver-is-up
command-line = bash -c "[ -S ${xserver:lock-dir}/.X11-unix/X${xserver:display-num} ]"
[fluxbox-menu]
recipe = slapos.recipe.template:jinja2
url = {{ fluxbox_menu }}
output = ${directory:fluxbox}/menu
context =
key beremiz_bin beremiz-x11:output
key xterm_bin xterm:output
raw fluxbox_location {{ fluxbox_location }}
[fluxbox-toolbar]
recipe = plone.recipe.command
command =
if [ ! -s "${directory:fluxbox}/init" ]; then
echo "session.screen0.toolbar.placement: TopCenter" > ${directory:fluxbox}/init;
fi
[fluxbox-bin]
recipe = slapos.cookbook:wrapper
command-line = {{ fluxbox_location }}/bin/fluxbox -display ${xserver:display}
wrapper-path = ${directory:bin}/fluxbox
environment =
HOME=${buildout:directory}
XORG_LOCK_DIR=${xserver:lock-dir}
FONTCONFIG_FILE=${font-config:output}
LANG=C.UTF-8
LC_ALL=C.UTF-8
depends =
${fluxbox-menu:recipe}
${fluxbox-toolbar:recipe}
[fluxbox]
recipe = slapos.cookbook:wrapper
command-line = ${fluxbox-bin:wrapper-path} -log ${directory:log}/fluxbox.log
wrapper-path = ${directory:services}/fluxbox
[beremiz-env.sh]
recipe = slapos.recipe.template
inline =
export LD_LIBRARY_PATH={{ mesa_location }}/lib
export PATH=${directory:bin}:{{ git_bin_dir }}:{{ autoconf_bin }}:{{ automake_bin }}:{{ matiec_location }}/bin:{{ bison_location }}/bin:{{ flex_location }}/bin:{{ bin_directory }}:{{ gcc_location }}/bin:$PATH
export XDG_DATA_DIR={{ gtk3_location }}/share
export GSETTINGS_SCHEMA_DIR={{ gtk3_location }}/share/glib-2.0/schemas
export FONTCONFIG_FILE=${font-config:output}
export DISPLAY=${xserver:display}
export XORG_LOCK_DIR=${xserver:lock-dir}
export LANG=C.UTF-8
export LC_ALL=C.UTF-8
export BEREMIZPYTHONPATH={{ python_bin }}
output = ${directory:bin}/beremiz-env.sh
[beremiz-x11]
recipe = slapos.recipe.template
inline =
#!/bin/sh -e
. ${beremiz-env.sh:output}
# wait a bit for xserver
sleep 1
exec {{ python_bin }} {{ beremiz_location }}/Beremiz.py
output = ${directory:bin}/beremiz-x11
[xterm]
recipe = slapos.recipe.template
inline =
#!/bin/sh -e
. ${beremiz-env.sh:output}
export SHELL={{bash_bin}}
exec {{ xterm_bin }}
output = ${directory:bin}/xterm
[request-vnc-frontend]
<= slap-connection
recipe = slapos.cookbook:requestoptional
slave = true
config-https-only = True
config-type = websocket
config-url = https://[${novnc-instance:ip}]:${novnc-instance:port}
return = secure_access domain
software-url = http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg
software-type = RootSoftwareInstance
name = Beremiz VNC
[publish-connection-information]
<= monitor-publish
recipe = slapos.cookbook:publish
backend-url = https://[${novnc-instance:ip}]:${novnc-instance:port}/vnc.html?host=[${novnc-instance:ip}]&port=${novnc-instance:port}&encrypt=1
url = ${request-vnc-frontend:connection-secure_access}/vnc.html?host=${request-vnc-frontend:connection-domain}&port=443&encrypt=1
vnc-password = ${random-password:passwd}
[buildout]
extends =
{{ template_logrotate }}
{{ ' ' ~ template_monitor }}
parts =
monitor-base
fluxbox
novnc-promise
xserver-promise
x11vnc-listen-promise
beremiz-x11
websockify-sighandler-service
request-vnc-frontend
generate-vnc-password
publish-connection-information
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/beremiz-ide/instance.cfg.in 0000664 0000000 0000000 00000006522 14241130220 0030652 0 ustar 00root root 0000000 0000000 [buildout]
parts = switch-softwaretype
eggs-directory = {{ buildout_egg_directory }}
develop-eggs-directory = {{ buildout_develop_directory }}
[switch-softwaretype]
recipe = slapos.cookbook:switch-softwaretype
default = template-{{ instance_template_type }}:output
RootSoftwareInstance = ${:default}
test = template-beremiz-test:output
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration.serialised
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
[jinja2-template-base]
recipe = slapos.recipe.template:jinja2
output = ${buildout:directory}/${:filename}
extensions = jinja2.ext.do
extra-context =
context =
key develop_eggs_directory buildout:develop-eggs-directory
key buildout_directory buildout:directory
key eggs_directory buildout:eggs-directory
key ipv4 slap-configuration:ipv4
key ipv6 slap-configuration:ipv6
key global_ipv4_prefix network-information:global-ipv4-network
key slapparameter_dict slap-configuration:configuration
key computer_id slap-configuration:computer
raw bin_directory {{ bin_directory }}
raw template_monitor {{ template_monitor_cfg }}
raw template_logrotate {{ logrotate_cfg }}
raw logrotate_cfg {{ logrotate_cfg }}
raw python_bin {{ python_bin }}
${:extra-context}
[template-beremiz]
<= jinja2-template-base
url = {{ template_instance_beremiz }}
filename = instance-beremiz.cfg
extra-context =
raw autoconf_bin {{ autoconf_location }}/bin
raw automake_bin {{ automake_location }}/bin
raw bash_bin {{ bash_location }}/bin/bash
raw beremiz_location {{ beremiz_location }}
raw bison_location {{ bison_location }}
raw flex_location {{ flex_location }}
raw fluxbox_location {{ fluxbox_location }}
raw fluxbox_menu {{ template_fluxbox_menu }}
raw font_config_loc {{ fontconfig_location }}
raw font_config_tmplt {{ template_fonts_conf_target }}
raw font_dejavu {{ font_dejavu }}
raw font_liberation {{ font_liberation }}
raw gcc_location {{ gcc_location }}
raw git_bin_dir {{ git_location }}
raw gtk3_location {{ gtk3_location }}
raw matiec_location {{ matiec_location }}
raw mesa_location {{ mesa_location }}
raw novnc_location {{ novnc_location }}
raw openssl_bin {{ openssl_location }}/bin/openssl
raw websockify_bin {{ bin_directory }}/websockify
raw x11vnc_bin {{ x11vnc_location }}/bin/x11vnc
raw xvfb_bin {{ xserver_location }}/bin/Xvfb
raw xterm_bin {{ xterm_location }}/bin/xterm
[template-beremiz-test]
<= jinja2-template-base
url = {{ template_instance_beremiz_test }}
filename = instance-beremiz-test.cfg
extra-context =
key instance_beremiz template-beremiz:output
raw beremiz_location {{ beremiz_location }}
raw sikulix_bin {{ sikulix_bin }}
raw xvfb_bin {{ xserver_location }}/bin/Xvfb
raw wmctrl_bin {{ wmctrl_location }}/bin/wmctrl
raw instance_nxdtest {{ nxdtest_template }}
raw xdotool_bin {{ xdotool_location }}/bin/xdotool
raw opencv_location {{ opencv_location }}
raw nxdtest_dir {{ buildout_directory }}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/beremiz-ide/software.cfg 0000664 0000000 0000000 00000012225 14241130220 0030270 0 ustar 00root root 0000000 0000000 [buildout]
extends =
../../stack/slapos.cfg
../../component/autoconf/buildout.cfg
../../component/flex/buildout.cfg
../../component/fonts/buildout.cfg
../../component/noVNC/buildout.cfg
../../component/nginx/buildout.cfg
../../component/lxml-python/buildout.cfg
../../component/numpy/buildout.cfg
../../component/numpy/openblas.cfg
../../component/matplotlib/buildout.cfg
../../component/wxpython/buildout.cfg
../../component/matiec/buildout.cfg
../../component/mesa/buildout.cfg
../../component/libvnc/buildout.cfg
../../component/open62541/buildout.cfg
../../component/sikuli/buildout.cfg
../../component/fluxbox/buildout.cfg
../../component/pygolang/buildout.cfg
../../component/xorg/buildout.cfg
../../component/pytest/buildout.cfg
../../component/opencv/buildout.cfg
../../component/xterm/buildout.cfg
../../stack/monitor/buildout.cfg
../../stack/nxdtest.cfg
./buildout.hash.cfg
parts +=
slapos-cookbook
beremiz-eggs
open62541
Modbus
xterm
instance
[gcc]
# Always build GCC for Fortran (see openblas).
max_version = 0
[python]
# Beremiz works with python2.7 for now, the code is not yet upgraded for python3
part = python2.7
[open62541]
# Beremiz need it to be in folder parts/open62541
# as Beremiz search for open62541 to BEREMIZ_PATH/../open62541
shared = false
post-install =
mkdir -p @@LOCATION@@/build/bin
ln -sf @@LOCATION@@/lib/libopen62541.a @@LOCATION@@/build/bin/libopen62541.a
[twisted]
recipe = zc.recipe.egg:custom
egg = twisted
setup-eggs =
six
pathlib
incremental
[beremiz-eggs]
recipe = zc.recipe.egg
eggs =
${wxPython:egg}
${python-cryptography:egg}
${lxml-python:egg}
${matplotlib:egg}
future
websockify
zeroconf2
enum34
pyro
${twisted:egg}
nevow
autobahn
pycountry
fonttools
${beremiz-setup:egg}
opcua
msgpack
[python-interpreter]
eggs +=
${beremiz-eggs:eggs}
[beremiz]
recipe = slapos.recipe.build:download-unpacked
# download beremiz at revision 86b02aa32d413437ddcb9ab6cf4cc72aad394b3e
url = https://github.com/beremiz/beremiz/archive/86b02aa32d413437ddcb9ab6cf4cc72aad394b3e.tar.gz
md5sum = 0b46be8c8e849bd612373dc999427912
[beremiz-setup]
recipe = zc.recipe.egg:develop
egg = beremiz
setup = ${beremiz:location}
[download-template]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:filename}
[instance]
recipe = slapos.recipe.template:jinja2
output = ${buildout:directory}/instance.cfg
url = ${:_profile_base_location_}/${:filename}
python-bin = ${buildout:bin-directory}/${python-interpreter:interpreter}
type = beremiz
context =
key autoconf_location autoconf:location
key automake_location automake:location
key bash_location bash:location
key beremiz_location beremiz-setup:setup
key bin_directory buildout:bin-directory
key buildout_egg_directory buildout:eggs-directory
key buildout_develop_directory buildout:develop-eggs-directory
key buildout_directory buildout:directory
key template_fluxbox_menu template-fluxbox-menu.in:target
key template_fonts_conf_target template-fonts-conf:output
key template_monitor_cfg monitor2-template:output
key template_instance_beremiz template-instance-beremiz:target
key template_instance_beremiz_test template-instance-beremiz-test:target
key template_logrotate template-logrotate-base:output
key fontconfig_location fontconfig:location
key font_dejavu dejavu-fonts:location
key font_liberation liberation-fonts:location
key logrotate_cfg template-logrotate-base:output
key bison_location bison:location
key flex_location flex:location
key fluxbox_location fluxbox:location
key gcc_location gcc:prefix
key git_location git:location
key gtk3_location gtk-3:location
key instance_template_type :type
key matiec_location matiec:location
key mesa_location mesa:location
key novnc_location noVNC:location
key nxdtest_template nxdtest-instance.cfg:output
key python_bin :python-bin
key opencv_location opencv:location
key openssl_location openssl:location
key sikulix_bin sikuli:output
key xdotool_location xdotool:location
key xserver_location xserver:location
key xterm_location xterm:location
key x11vnc_location x11vnc:location
key wmctrl_location wmctrl:location
[template-instance-beremiz]
<= download-template
output = ${buildout:directory}/instance-beremiz.cfg.jinja2
[template-instance-beremiz-test]
<= download-template
output = ${buildout:directory}/instance-beremiz-test.cfg.jinja2
[template-fluxbox-menu.in]
<= download-template
output = ${buildout:directory}/fluxbox-menu.in
[versions]
Pillow = 6.2.2
matplotlib = 2.2.5
kiwisolver = 1.1.0
cycler = 0.10.0
websockify = 0.9.0
Pyro = 3.16
zeroconf2 = 0.19.2
cython = 0.29.24
sphinx = 1.8.5
doc2dash = 2.3.0
Twisted = 20.3.0
autobahn = 19.11.2
attrs = 19.2.0
Automat = 0.3.0
zope.interface = 4.4.2
Nevow = 0.14.5
PyHamcrest = 2.0.3
constantly = 15.1.0
hyperlink = 21.0.0
incremental = 21.3.0
future = 0.18.2
pycountry = 18.12.8
fonttools = 3.44.0
idna = 2.10
PyHamcrest = 2.0.2
txaio = 18.8.1
characteristic = 14.3.0
typing = 3.10.0.0
ifcfg = 0.22
opcua = 0.98.13
futures = 3.3.0
trollius = 2.2.1
pathlib = 1.0.1
ddt = 1.4.4
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/beremiz-ide/test.cfg 0000664 0000000 0000000 00000002532 14241130220 0027415 0 ustar 00root root 0000000 0000000 [buildout]
extends =
software.cfg
../../component/git/buildout.cfg
../../component/pytest/buildout.cfg
[beremiz-repository]
recipe = slapos.recipe.build:gitclone
repository = https://github.com/beremiz/beremiz
branch = wxPython4
location = ${buildout:parts-directory}/beremiz
git-executable = ${git:location}/bin/git
[beremiz-setup]
setup = ${beremiz-repository:location}
depends =
${beremiz-gen-nxdtest:recipe}
[ddt]
recipe = zc.recipe.egg:custom
egg = ddt
setup-eggs =
enum34
[python-interpreter]
eggs +=
${pytest:eggs}
pytest-timeout
${ddt:egg}
[instance]
type = beremiz-test
[gen-nxdtest.sh]
recipe = slapos.recipe.template
output = ${buildout:parts-directory}/gennxdtest.sh
nxdtest = ${buildout:directory}/.nxdtest
inline =
#!/bin/sh -e
cd ${beremiz-repository:location}/tests/ide_tests/
testlist=$(ls -d *.sikuli)
rm -f ${:nxdtest}
for test in $testlist; do
if [ -z "$test" ]; then
continue;
fi
# beremiztest script is generated by the instance (call make test_dir=xxx xserver_command=xxx)
cat <> ${:nxdtest}
TestCase(
"$test",
['beremiztest', '$test'],
cwd="""${beremiz-repository:location}/tests""",
summaryf=UnitTest.summary,
)
EOF
done
[beremiz-gen-nxdtest]
recipe = plone.recipe.command
command = ${gen-nxdtest.sh:output}
update-command = ${:command}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/beremiz-ide/test/ 0000775 0000000 0000000 00000000000 14241130220 0026732 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/beremiz-ide/test/README.md 0000664 0000000 0000000 00000000044 14241130220 0030207 0 ustar 00root root 0000000 0000000
Beremiz IDE tests Software Release
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/beremiz-ide/test/setup.py 0000664 0000000 0000000 00000003660 14241130220 0030451 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2022 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.beremiz_ide'
with open("README.md") as f:
long_description = f.read()
setup(name=name,
version=version,
description="Beremiz IDE test",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'slapos.libnetworkcache',
'requests',
],
zip_safe=True,
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/beremiz-ide/test/test.py 0000664 0000000 0000000 00000003771 14241130220 0030273 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2019 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from __future__ import unicode_literals
import os
import requests
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '../software.cfg')))
class TestBeremizInstance(SlapOSInstanceTestCase):
__partition_reference__ = 'B'
@classmethod
def getInstanceSoftwareType(cls):
return 'default'
def setUp(self):
self.connection_parameters = self.computer_partition.getConnectionParameterDict()
def test_url_get(self):
resp = requests.get(self.connection_parameters['backend-url'], verify=False)
self.assertEqual(requests.codes.ok, resp.status_code)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/beremiz-runtime/ 0000775 0000000 0000000 00000000000 14241130220 0026675 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/beremiz-runtime/buildout.hash.cfg 0000664 0000000 0000000 00000000130 14241130220 0032121 0 ustar 00root root 0000000 0000000 [instance-profile]
filename = instance.cfg.in
md5sum = 6e3e1dc304378640707cdb6a792106f1
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/beremiz-runtime/instance.cfg.in 0000664 0000000 0000000 00000003756 14241130220 0031602 0 ustar 00root root 0000000 0000000 #############################
#
# Deploy beremiz' runtime instance
#
#############################
[buildout]
parts =
publish-connection-parameter
download-plc
beremiz-runtime
#beremiz-runtime-promise
eggs-directory = {{ buildout['eggs-directory'] }}
develop-eggs-directory = {{ buildout['develop-eggs-directory'] }}
offline = true
extends = {{ template_monitor }}
[download-plc]
recipe = slapos.recipe.build:download-unpacked
offline = false
url = ${instance-parameter:configuration.runtime_plc_url}
[instance-parameter]
recipe = slapos.cookbook:slapconfiguration
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
configuration.runtime_plc_url =
configuration.runtime_plc_md5sum =
configuration.autostart = 1
configuration.interface = 0.0.0.0
configuration.port = 61248
# Create all needed directories, depending on your needs
[directory]
recipe = slapos.cookbook:mkdirectory
home = ${buildout:directory}
etc = ${:home}/etc
var = ${:home}/var
script = ${:etc}/run
service = ${:etc}/service
log = ${:var}/log
[beremiz-runtime]
logfile = ${directory:log}/beremiz-runtime.log
recipe = slapos.cookbook:wrapper
command-line =
{{ buildout['bin-directory'] }}/pythonwitheggs {{ buildout['directory'] }}/parts/beremiz-source/Beremiz_service.py -a ${instance-parameter:configuration.autostart} -p ${instance-parameter:configuration.port} -i ${instance-parameter:configuration.interface} -x 1 ${directory:home}/parts/download-plc
wrapper-path = ${directory:service}/beremiz-runtime
[beremiz-runtime-promise]
<= monitor-promise-base
module = check_port_listening
name = beremiz-runtime.py
config-hostname= ${instance-parameter:configuration.interface}
config-port = ${instance-parameter:configuration.port}
[publish-connection-parameter]
recipe = slapos.cookbook:publish
port = ${instance-parameter:configuration.port}
interface = ${instance-parameter:configuration.interface}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/beremiz-runtime/software.cfg 0000664 0000000 0000000 00000003410 14241130220 0031206 0 ustar 00root root 0000000 0000000 [buildout]
extends =
buildout.hash.cfg
../../component/git/buildout.cfg
../../stack/monitor/buildout.cfg
../../stack/slapos.cfg
parts =
beremiz-source
slapos-cookbook
instance-profile
python-interpreter
[beremiz-source]
recipe = slapos.recipe.build:gitclone
repository = https://github.com/beremiz/beremiz.git
branch = default
git-executable = ${git:location}/bin/git
[beremiz]
recipe = zc.recipe.egg:develop
egg = beremiz
setup = ${beremiz-source:location}
[Twisted]
recipe = zc.recipe.egg:custom
egg = Twisted
setup-eggs =
six
pathlib
incremental
[python-interpreter]
recipe = zc.recipe.egg
interpreter = pythonwitheggs
eggs = click
prompt_toolkit
pygments
bitarray
future
six
Pyro
zeroconf-py2compat
pathlib
Nevow
msgpack
autobahn
${beremiz:egg}
${Twisted:egg}
[instance-profile]
recipe = slapos.recipe.template:jinja2
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/instance.cfg
extensions = jinja2.ext.do
context =
section buildout buildout
raw template_monitor ${monitor2-template:output}
# md5sum is fetched from buildout.hash.cfg and can be recalculated automatically by
# calling update-hash
[versions]
Twisted = 20.3.0
attrs = 19.2.0
Automat = 0.3.0
zope.interface = 4.4.2
Nevow = 0.14.5
PyHamcrest = 2.0.2
Pygments = 2.9.0
Pyro = 3.16
bitarray = 2.1.3
constantly = 15.1.0
future = 0.18.2
hyperlink = 21.0.0
incremental = 21.3.0
pathlib = 1.0.1
prompt-toolkit = 3.0.19
zeroconf-py2compat = 0.19.10
# Required by:
# Automat==0.3.0
characteristic = 14.3.0
# Required by:
# zeroconf-py2compat==0.19.10
ifcfg = 0.21
# Required by:
# hyperlink==21.0.0
typing = 3.10.0.0
autobahn = 19.11.2
txaio = 18.8.1
idna = 2.10
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/build-rina/ 0000775 0000000 0000000 00000000000 14241130220 0025605 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/build-rina/build.cfg 0000664 0000000 0000000 00000006536 14241130220 0027377 0 ustar 00root root 0000000 0000000 [buildout]
extends =
../../component/vm-img/debian.cfg
parts =
debuild-rina-tools
test-rina
download-cache = download-cache
[vm-run-base]
smp = 4
# estimate the of RAM needed depending on the number CPUs
mem = 2048
apt-sources =
apt-update =
for x in ${:apt-sources}
do echo deb [trusted=yes] file:`map $x` ./
done |sudo sh -c "cat >>/etc/apt/sources.list
apt-get update"
[debuild-rina-base]
<= vm-run-base
mount.rina = ${irati-stack:location}
cd-source =
git clone -snc core.sparseCheckout=true /mnt/rina rina
cd rina
echo /${:component} > .git/info/sparse-checkout
git reset --hard
cd ${:component}
# following lines are for debchange
export NAME=`git show -s --format=%cn`
export EMAIL=`git show -s --format=%ce`
dist=${:suite}
[ "$dist" ] && dist=`lsb_release -sc`/$dist || dist=UNRELEASED
prepare =
build-deps =
mk-build-deps -irs sudo -t 'apt-get -y'
build =
dpkg-buildpackage -uc -b -jauto
finalize =
df
cd ..
mv *.changes *.deb $PARTDIR
cd $PARTDIR
dpkg-scanpackages -m . /dev/null |gzip --best >Packages.gz
r=`apt-ftparchive release .`
echo "$r" > Release
command =
${:apt-update}
${:cd-source}
( ${:prepare}
${:build-deps}
${:build}
) 2>&1 |tee $PARTDIR/build.log
${:finalize}
[debuild-rina-kernel]
<= debuild-rina-base
component = linux
prepare =
debian/dch-snapshot --force-distribution -D $dist
sudo apt-get -y install kernel-wedge quilt python-six
QUILT_PATCHES=$PWD/debian/patches QUILT_PC=.pc quilt push --quiltrc - -a -q --fuzz=0
debian/rules debian/rules.gen || :
build =
arch=`dpkg-architecture -qDEB_HOST_ARCH`
fakeroot make -j${:smp} -f debian/rules.gen binary-arch_$${arch}_none binary-libc-dev_$${arch}
dpkg-genchanges -b -UBinary -UDescription \
>../linux_`dpkg-parsechangelog -S Version`_$${arch}.changes
[debuild-librina]
<= debuild-rina-base
component = librina
apt-sources = ${debuild-rina-kernel:location}
prepare =
debian/rules DIST=$dist
[debuild-rinad]
<= debuild-librina
component = rinad
apt-sources = ${debuild-librina:location}
[rina-tools-deps]
apt-sources = ${debuild-librina:location} ${debuild-rinad:location}
[debuild-rina-tools]
<= debuild-librina
rina-tools-deps
component = rina-tools
# check that the built packages install
finalize =
sudo dpkg -i ../*.deb
${debuild-librina:finalize}
[test-rina]
<= vm-run-base
rina-tools-deps
mount.download-cache = ${buildout:download-cache}
mount.rina = ${irati-stack:location}
mount.slapos = ${:_profile_base_location_}/../..
mount.slapos.package = ${slapos.package-repository:location}
commands = install test
install =
${:apt-update}
sudo apt-get -y install librinad-dev rinad
mkdir software
cd software
cat <buildout.cfg
[buildout]
extends = /mnt/slapos/software/hellorina/software.cfg
develop = /mnt/slapos
download-cache = /mnt/download-cache
[slapos-cookbook-develop]
recipe =
setup =
[irati-stack]
repository = /mnt/rina
shared = true
[versions]
slapos.cookbook =
EOF
python -S /mnt/buildout/bin/buildout bootstrap
MAKEFLAGS=-j${:smp} bin/buildout
arch=`dpkg-architecture -qDEB_HOST_ARCH`
sudo sh -c "/mnt/slapos.package/playbook/roles/rina/gen-ipcm-conf
systemctl enable ipcm-re6st
dpkg -i `map ${debuild-rina-kernel:location}`/linux-image-*-$${arch}_*.deb"
reboot
test = ( set -x
# TODO: more tests
grep re6st/1// /sys/rina/ipcps/1/name
) 2>&1 |tee $PARTDIR/result
instance-build-rina-input-schema.json 0000664 0000000 0000000 00000001322 14241130220 0034642 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/build-rina {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Parameters to build/publish RINA packages and test hellorina SR.",
"type": "object",
"additionalProperties": false,
"properties": {
"publish": {
"description": "Upload built packages automatically to a Debian repository when successful.",
"type": "object",
"required": [
"suite",
"host",
"key"
],
"properties": {
"suite": {
"type": "string"
},
"host": {
"type": "string"
},
"port": {
"type": "integer",
"default": 22
},
"key": {
"type": "string"
}
}
}
}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/build-rina/runTestSuite.in 0000664 0000000 0000000 00000013747 14241130220 0030627 0 ustar 00root root 0000000 0000000 #!{{parameter_dict['runTestSuite_py']}}
from __future__ import print_function
import argparse, os, subprocess, sys, traceback
from time import gmtime, strftime, time
# These are the 2 modules to reuse when using ERP5 for managing test bots.
# What we do here is currently too new to reuse more from testsuite.
from erp5.util import taskdistribution
from erp5.util.testsuite import format_command
from zc.buildout.buildout import Buildout
{% set vm = parameter_dict['vm'] -%}
dist_list = {{vm['dists'].split()}}
publish = {{slapparameter_dict.get('publish')}}
# ERP5 must be changed to only distinguish SKIP/EXPECTED/UNEXPECTED,
# instead of SKIP/FAIL/ERROR. Unlike NEO, we categorize XFAIL as SKIP
# so that the overall status is PASS if there's no FAIL/XPASS/ERROR.
STAT_MAP = dict(
TOTAL = 'test_count',
PASS = None,
SKIP = 'skip_count',
XFAIL = 'skip_count',
FAIL = 'failure_count',
XPASS = 'failure_count',
ERROR = 'error_count',
)
class DummyTestResult:
class DummyTestResultLine:
def stop(self, duration, stdout='', **kw):
print('\n' + stdout)
print('Ran in %.3fs' % duration)
done = 0
def __init__(self, test_name_list):
self.test_name_list = test_name_list
def start(self):
test_result_line = self.DummyTestResultLine()
try:
test_result_line.name = self.test_name_list[self.done]
except IndexError:
return
self.done += 1
return test_result_line
def main():
os.environ.update({k: v.strip() % os.environ
for k, v in {{parameter_dict['environment'].items()}}})
parser = argparse.ArgumentParser(description='Run a test suite.')
parser.add_argument('--test_suite', help='The test suite name')
parser.add_argument('--test_suite_title', help='The test suite title')
parser.add_argument('--test_node_title', help='The test node title')
parser.add_argument('--project_title', help='The project title')
parser.add_argument('--revision', help='The revision to test',
default='dummy_revision')
parser.add_argument('--node_quantity', type=int,
help='Number of CPUs to use for the VM')
parser.add_argument('--master_url',
help='The Url of Master controling many suites')
args = parser.parse_args()
test_title = args.test_suite_title or args.test_suite
if args.master_url:
tool = taskdistribution.TaskDistributor(args.master_url)
test_result = tool.createTestResult(args.revision,
dist_list,
args.test_node_title,
test_title=test_title,
project_title=args.project_title)
if test_result is None:
return
else:
test_result = DummyTestResult(dist_list)
Buildout._setup_logging = lambda self: None
fd = os.open('buildout.cfg', os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0666)
try:
os.write(fd, """\
[buildout]
offline = true
relative-paths = true
""")
Buildout('buildout.cfg', {}).bootstrap(None)
os.write(fd, """\
extends = {{parameter_dict['profile_base_location']}}/build.cfg
develop-eggs-directory = {{buildout['develop-eggs-directory']}}
eggs-directory = {{buildout['eggs-directory']}}
download-cache = {{parameter_dict['download-cache']}}
[vm-run-base]
environment =
vm = {{vm['location']}}
[irati-stack]
location = {{parameter_dict['irati_stack']}}
[slapos.package-repository]
location = {{parameter_dict['slapos_package']}}
""")
finally:
os.close(fd)
librina_log = os.path.join('parts', 'debuild-librina', 'build.log')
stderr_write = sys.stderr.write
while 1:
test_result_line = test_result.start()
if not test_result_line:
break
dist = test_result_line.name
cmd = [os.path.join('bin', 'buildout'),
'vm-run-base:dist=' + dist,
'debuild-rina-base:suite=' + (publish['suite'] if publish else ''),
]
if args.node_quantity:
cmd.append('vm-run-base:smp=%s' % args.node_quantity)
status_dict = {'command': format_command(*cmd)}
print('$', status_dict['command'])
# Wanted on test result lines:
# status: UNKNOWN in case of buildout failure
# (even if the test suite could be run)
# output: test suite summary if any
# error: buildout traceback or test suite log
start = time()
try:
try:
p = subprocess.Popen(cmd, stderr=subprocess.PIPE)
stderr = []
while 1:
line = p.stderr.readline()
if not line:
break
stderr_write(line)
stderr.append(line)
returncode = p.wait()
finally:
end = time()
del p
if returncode:
iter_err = enumerate(reversed(stderr), 1)
for i, line in iter_err:
if line == "Traceback (most recent call last):\n":
for i, line in iter_err:
if line == '\n':
break
for i, line in iter_err:
if line[0] != ' ':
break
break
if line == "While:\n":
del stderr[:-i]
status_dict['stderr'] = ''.join(stderr)
with open(librina_log) as f:
log = f.readlines()
del log[:log.index('make check-TESTS\n')]
for i, line in enumerate(log):
if line.startswith('Testsuite summary'):
del log[log.index(log[i+1], i+2):]
status_dict['stdout'] = ''.join(log[i:])
stat = {}
for line in log[i+2:]:
k, v = line[2:].split(':')
k = STAT_MAP[k]
if k:
stat[k] = stat.get(k, 0) + int(v.strip())
if not returncode:
status_dict.update(stat)
status_dict.setdefault('stderr', ''.join(log[:i-1]))
break
except Exception:
status_dict.setdefault('stderr', traceback.format_exc())
test_result_line.stop(
date = strftime("%Y/%m/%d %H:%M:%S", gmtime(end)),
duration = end - start,
**status_dict)
# TODO: upload packages if 'publish' parameter is given
if __name__ == "__main__":
main()
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/build-rina/software.cfg 0000664 0000000 0000000 00000005437 14241130220 0030131 0 ustar 00root root 0000000 0000000 [buildout]
extends =
../../stack/slapos.cfg
../../component/vm-img/debian.cfg
../../component/rina-tools/buildout.cfg
parts =
slapos-cookbook
template
download-cache = ${:directory}/download-cache
[template]
recipe = slapos.recipe.template:jinja2
# XXX: "template.cfg" is hardcoded in instanciation recipe
output = ${buildout:directory}/template.cfg
inline =
[buildout]
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
parts = runTestSuite
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration.serialised
computer = $${slap-connection:computer-id}
partition = $${slap-connection:partition-id}
url = $${slap-connection:server-url}
key = $${slap-connection:key-file}
cert = $${slap-connection:cert-file}
[directory]
recipe = slapos.cookbook:mkdirectory
bin = $${buildout:directory}/bin
[runTestSuite]
recipe = slapos.recipe.template:jinja2
output = $${directory:bin}/$${:_buildout_section_name_}
url = ${:_profile_base_location_}/$${:_buildout_section_name_}.in
context =
{##} section buildout buildout
{##} section parameter_dict runTestSuite-parameters
{##} key slapparameter_dict slap-configuration:configuration
[runTestSuite-parameters]
environment = {{dumps(environment)}}
vm = {{dumps(vm)}}
download-cache = ${buildout:download-cache}
runTestSuite_py = ${buildout:bin-directory}/${runTestSuite_py:interpreter}
profile_base_location = ${:_profile_base_location_}
irati_stack = ${irati-stack:location}
slapos_package = ${slapos.package-repository:location}
context =
section environment vm-run-environment
section vm vm-debian
[runTestSuite_py]
recipe = zc.recipe.egg
eggs = erp5.util
zc.buildout
interpreter = ${:_buildout_section_name_}
[irati-stack]
sparse-checkout = /.gitignore
# When run from erp5.util:testnode (and this always the case), shared=true is
# added to this section and the path in .git/objects/info/alternates is outside
# the 'rina' mount point of the VM. Let's mount the testnode working copy
# directly. Comment the following 2 lines if 'repository' is a URL.
recipe =
location = ${:repository}
[slapos.package-repository]
recipe = slapos.recipe.build:gitclone
repository = https://lab.nexedi.com/nexedi/slapos.package.git
git-executable = ${git:location}/bin/git
sparse-checkout = /playbook/roles/rina
[vm-debian]
# building a generic Debian kernel uses a lot of space
size = 16Gi
dists = debian-jessie
packages +=
# generic (another SR that build packages automatically would use the same list)
apt-utils build-essential devscripts equivs lsb-release
# specific
git ca-certificates python
# biggest and common build-deps for RINA
dh-autoreconf pkg-config doxygen maven xmlto
# hellorina (shouldn't parts like lxml-python depend on the python of the SR?)
python-dev
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/buildout-testing/ 0000775 0000000 0000000 00000000000 14241130220 0027061 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/buildout-testing/runTestSuite.in 0000664 0000000 0000000 00000007650 14241130220 0032077 0 ustar 00root root 0000000 0000000 #!{{runTestSuite_py}}
from __future__ import print_function
import argparse, os, re, subprocess, sys
from time import gmtime, strftime, time
from erp5.util import taskdistribution
from erp5.util.testsuite import SubprocessError, TestSuite
from pkg_resources import get_distribution
from zc.buildout.buildout import Buildout
if str is bytes:
str2bytes = lambda s: s
else:
str2bytes = lambda s: s.encode()
slapos_buildout = {{repr(slapos_buildout)}}
test_dict = {
'zc.buildout': slapos_buildout,
'zc.recipe.egg': os.path.join(slapos_buildout, 'zc.recipe.egg_'),
}
class DummyTestResult:
class DummyTestResultLine:
def stop(self, **kw):
pass
done = 0
def __init__(self, test_name_list):
self.test_name_list = test_name_list
def start(self):
test_result_line = self.DummyTestResultLine()
try:
test_result_line.name = self.test_name_list[self.done]
except IndexError:
return
self.done += 1
return test_result_line
class BuildoutTestSuite(TestSuite):
RUN_RE = re.compile(
br'Ran (?P\d+) tests with'
br' (?P\d+) failures,'
br' (?P\d+) errors and'
br' (?P\d+) skipped in')
def run(self, test):
start = time()
try:
status_dict = self.spawn(os.path.join('bin', 'zope-testrunner'),
'--test-path', os.path.join(test_dict[test], 'src'))
except SubprocessError as e:
status_dict = e.status_dict
end = time()
status_dict.update(
date = strftime("%Y/%m/%d %H:%M:%S", gmtime(end)),
duration = end - start)
search = self.RUN_RE.search(status_dict['stdout'])
if search:
groupdict = search.groupdict()
status_dict.update(
test_count = int(groupdict['all_tests']),
error_count = int(groupdict['errors']),
failure_count = int(groupdict['failures']),
skip_count = int(groupdict['skips']))
return status_dict
def main():
os.environ['TEMP'] = {{repr(temp_directory)}}
parser = argparse.ArgumentParser(description='Run a test suite.')
parser.add_argument('--test_suite', help='The test suite name')
parser.add_argument('--test_suite_title', help='The test suite title')
parser.add_argument('--test_node_title', help='The test node title')
parser.add_argument('--project_title', help='The project title')
parser.add_argument('--revision', help='The revision to test',
default='dummy_revision')
parser.add_argument('--master_url',
help='The Url of Master controling many suites')
args = parser.parse_args()
test_title = args.test_suite_title or args.test_suite
if args.master_url:
tool = taskdistribution.TaskDistributor(args.master_url)
test_result = tool.createTestResult(args.revision,
list(test_dict),
args.test_node_title,
test_title=test_title,
project_title=args.project_title)
if test_result is None:
return
else:
test_result = DummyTestResult(list(test_dict))
fd = os.open('buildout.cfg', os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o666)
try:
os.write(fd, str2bytes("""\
[buildout]
develop =%s
parts = testrunner
newest = false
versions = versions
[versions]
%s
zope.exceptions = 4.3
zope.interface = 4.7.2
zope.testing = 4.7
[testrunner]
recipe = zc.recipe.egg
eggs =
${:recipe}
zope.testing
zope.testrunner
scripts =
zope-testrunner
extra-paths =
%s
""" % (''.join('\n ' + x for x in test_dict.values()),
'\n'.join(x + ' =' for x in test_dict),
get_distribution('manuel').location,
)))
finally:
os.close(fd)
Buildout('buildout.cfg', {}).install(None)
test_suite = BuildoutTestSuite(1)
while 1:
test_result_line = test_result.start()
if not test_result_line:
break
test_result_line.stop(**test_suite.run(test_result_line.name))
if __name__ == "__main__":
main()
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/buildout-testing/software-py3.cfg 0000664 0000000 0000000 00000000075 14241130220 0032107 0 ustar 00root root 0000000 0000000 [buildout]
extends =
software.cfg
[python]
part = python3
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/buildout-testing/software.cfg 0000664 0000000 0000000 00000003374 14241130220 0031403 0 ustar 00root root 0000000 0000000 [buildout]
extends =
../../component/python-manuel/buildout.cfg
../../stack/slapos.cfg
parts =
slapos-cookbook
template
[slapos.buildout-repository]
recipe = slapos.recipe.build:gitclone
repository = https://lab.nexedi.com/nexedi/slapos.buildout.git
git-executable = ${git:location}/bin/git
[runTestSuite_py]
recipe = zc.recipe.egg
eggs = erp5.util
zc.buildout
${manuel:egg}
scripts = ${:interpreter}
interpreter = ${:_buildout_section_name_}
[template]
recipe = slapos.recipe.template
# XXX: "template.cfg" is hardcoded in instanciation recipe
output = ${buildout:directory}/template.cfg
inline =
[buildout]
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
parts = runTestSuite
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration.serialised
computer = $${slap-connection:computer-id}
partition = $${slap-connection:partition-id}
url = $${slap-connection:server-url}
key = $${slap-connection:key-file}
cert = $${slap-connection:cert-file}
[directory]
recipe = slapos.cookbook:mkdirectory
bin = $${buildout:directory}/bin
tmp = $${buildout:directory}/tmp
[slapos.buildout-repository]
recipe = slapos.recipe.build:gitclone
repository = ${slapos.buildout-repository:location}
git-executable = ${git:location}/bin/git
shared = true
[runTestSuite]
recipe = slapos.recipe.template:jinja2
output = $${directory:bin}/$${:_buildout_section_name_}
url = ${:_profile_base_location_}/$${:_buildout_section_name_}.in
context =
key slapparameter_dict slap-configuration:configuration
key slapos_buildout slapos.buildout-repository:location
key temp_directory directory:tmp
raw runTestSuite_py ${buildout:bin-directory}/${runTestSuite_py:interpreter}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/ 0000775 0000000 0000000 00000000000 14241130220 0026460 5 ustar 00root root 0000000 0000000 CHANGES.caddy_frontend.rst 0000664 0000000 0000000 00000006410 14241130220 0033166 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend Changes
=======
Here are listed the most important changes, which might affect upgrades.
1.0.XXX (XXXX-XX-XX)
--------------------
* fix: exposed log file names are stabilised
* feature: in case of not found instance more information are provided
* feature: telemetry is fully disabled
* feature: Apache Traffic Server 8.0 is used
* feature: backend-haproxy statistic for haproxy's frontend is available
* fix: slave publication has been fixed in case of mixed case slave reference
* feature: running test/test.py resolves with starting backend used in tests
* fix: automatic caucase-updater usage has been fixed
* fix/workaround: reconnect to backend-haproxy from Caddy and Apache Traffic Server
* fix/feature: use explicitly Apache Traffic Server simulation of stale-if-error, as in reality Apache Traffic Server does not support it
* feature: dropped not used parameters
* feature: Strict-Transport-Security aka HSTS
* fix: use kedifa with with for file with multiple CAs
* feature: support query string (the characters after ? in the url) in url and https-url
* fix: by having unique acl names fix rare bug of directing traffic to https-url instead of url or otherwise
* feature: failover backend
1.0.164 (2020-09-24)
--------------------
* feature: serve a stale result up to 1 day if the origin server is down
* feature: request real frontend for slave introspection (aka log access)
* fix: Kedifa reloading, it was resulting with kedifa server disallowing access after some time
* feature: allow to set software release for each node, instead for the whole cluster
* fix: haproxy matches correct hostname in case of wildcards, instead of using wildcard host instead of the specific one
1.0.160 (2020-08-25)
--------------------
* haproxy updated from 2.0.15 to 2.0.17 in order to fix issue while accessing inaccessible backends
1.0.159 (2020-07-30)
--------------------
* logs are ensured to be available in slave's ``log-access-url``
* logs from backend Haproxy are also available to slaves
1.0.158 (2020-07-24)
--------------------
* manual customisation of profiles has been dropped, as not used, dropped keys are ``apache_custom_http``, ``apache_custom_https``, ``caddy_custom_http``, ``caddy_custom_https`` from slaves and ``-frontend-authorized-slave-string`` from master
* ``re6st-optimal-test`` has been dropped from slave
* QUIC is dropped, as was not used and has been superseded by HTTP/3, dropped key is ``enable-quic`` from master
* haproxy is used as a gateway to backends:
* ``automatic-internal-backend-client-caucase-csr`` switch for master is introduced to control it CSR signing
* ``proxy-try-duration`` and ``proxy-try-interval`` has been dropped, as Caddy is not used anymore to connect to the backend, and instead ``backend-connect-timeout`` and ``backend-connect-retries`` is used, as it comes from Haproxy
* ``backend-client-caucase-url`` is returned in master and slave, so that backends can use caucase to fetch CA from frontend cluster
* ``request-timeout`` is supported per slave, as now it became possible
* ``authenticate-to-backend`` is added for master and slave, defaulting to False, to have control over cluster default authentication, and make it possible to do it per slave
1.0.149 (2020-05-05)
--------------------
* no changes noted
README.caddy_frontend.rst 0000664 0000000 0000000 00000054340 14241130220 0033060 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend ==============
Caddy Frontend
==============
Frontend system using Caddy, based on apache-frontend software release, allowing to rewrite and proxy URLs like myinstance.myfrontenddomainname.com to real IP/URL of myinstance.
Caddy Frontend works using the master instance / slave instance design. It means that a single main instance of Caddy will be used to act as frontend for many slaves.
This documentation covers only specific scenarios. Most of the parameters are described in `software.cfg.json `_.
Software type
=============
Caddy frontend is available in 4 software types:
* ``default`` : The standard way to use the Caddy frontend configuring everything with a few given parameters
* ``custom-personal`` : This software type allow each slave to edit its Caddy configuration file
* ``default-slave`` : XXX
* ``custom-personal-slave`` : XXX
About frontend replication
==========================
Slaves of the root instance are sent as a parameter to requested frontends which will process them. The only difference is that they will then return the would-be published information to the root instance instead of publishing it. The root instance will then do a synthesis and publish the information to its slaves. The replicate instance only use 5 type of parameters for itself and will transmit the rest to requested frontends.
These parameters are:
* ``-frontend-type`` : the type to deploy frontends with. (default to "default")
* ``-frontend-quantity`` : The quantity of frontends to request (default to "1")
* ``-frontend-i-state``: The state of frontend i
* ``-frontend-i-software-release-url``: Software release to be used for frontends, default to the current software release
* ``-frontend-config-i-foo``: Frontend i will be requested with parameter foo
* ``-sla-i-foo`` : where "i" is the number of the concerned frontend (between 1 and "-frontend-quantity") and "foo" a sla parameter.
For example::
3
custom-personal
stopped
COMP-1234
https://lab.nexedi.com/nexedi/slapos/raw/someid/software/caddy-frontend/software.cfg
will request the third frontend on COMP-1234 and with SR https://lab.nexedi.com/nexedi/slapos/raw/someid/software/caddy-frontend/software.cfg. All frontends will be of software type ``custom-personal``. The second frontend will be requested with the state stopped.
*Note*: the way slaves are transformed to a parameter avoid modifying more than 3 lines in the frontend logic.
**Important NOTE**: The way you ask for slave to a replicate frontend is the same as the one you would use for the software given in "-frontend-quantity". Do not forget to use "replicate" for software type. XXXXX So far it is not possible to do a simple request on a replicate frontend if you do not know the software_guid or other sla-parameter of the master instance. In fact we do not know yet the software type of the "requested" frontends. TO BE IMPLEMENTED
XXX Should be moved to specific JSON File
Extra-parameter per frontend with default::
ram-cache-size = 1G
disk-cache-size = 8G
How to deploy a frontend server
===============================
This is to deploy an entire frontend server with a public IPv4. If you want to use an already deployed frontend to make your service available via ipv4, switch to the "Example" parts.
First, you will need to request a "master" instance of Caddy Frontend with:
* A ``domain`` parameter where the frontend will be available
like::
moulefrite.org
Then, it is possible to request many slave instances (currently only from slapconsole, UI doesn't work yet) of Caddy Frontend, like::
instance = request(
software_release=caddy_frontend,
partition_reference='frontend2',
shared=True,
partition_parameter_kw={"url":"https://[1:2:3:4]:1234/someresource"}
)
Those slave instances will be redirected to the "master" instance, and you will see on the "master" instance the associated proper directives of all slave instances.
Finally, the slave instance will be accessible from: https://someidentifier.moulefrite.org.
About SSL and SlapOS Master Zero Knowledge
==========================================
**IMPORTANT**: One Caddy can not serve more than one specific SSL site and be compatible with obsolete browser (i.e.: IE8). See http://wiki.apache.org/httpd/NameBasedSSLVHostsWithSNI
SSL keys and certificates are directly send to the frontend cluster in order to follow zero knowledge principle of SlapOS Master.
*Note*: Until master partition or slave specific certificate is uploaded each slave is served with fallback certificate. This fallback certificate is self signed, does not match served hostname and results with lack of response on HTTPs.
Obtaining CA for KeDiFa
-----------------------
KeDiFa uses caucase and so it is required to obtain caucase CA certificate used to sign KeDiFa SSL certificate, in order to be sure that certificates are sent to valid KeDiFa.
The easiest way to do so is to use caucase.
On some secure and trusted box which will be used to upload certificate to master or slave frontend partition install caucase https://pypi.org/project/caucase/
Master and slave partition will return key ``kedifa-caucase-url``, so then create and start a ``caucase-updater`` service::
caucase-updater \
--ca-url "${kedifa-caucase-url}" \
--cas-ca "${frontend_name}.caucased.ca.crt" \
--ca "${frontend_name}.ca.crt" \
--crl "${frontend_name}.crl"
where ``frontend_name`` is a frontend cluster to which you will upload the certificate (it can be just one slave).
Make sure it is automatically started when trusted machine reboots: you want to have it running so you can forget about it. It will keep KeDiFa's CA certificate up to date when it gets renewed so you know you are still talking to the same service as when you previously uploaded the certificate, up to the original upload.
Master partition
----------------
After requesting master partition it will return ``master-key-generate-auth-url`` and ``master-key-upload-url``.
Doing HTTP GET on ``master-key-generate-auth-url`` will return authentication token, which is used to communicate with ``master-key-upload-url``. This token shall be stored securely.
By doing HTTP PUT to ``master-key-upload-url`` with appended authentication token it is possible to upload PEM bundle of certificate, key and any accompanying CA certificates to the master.
Example sessions is::
request(...)
curl -g -X GET --cacert "${frontend_name}.ca.crt" --crlfile "${frontend_name}.crl" master-key-generate-auth-url
> authtoken
cat certificate.pem ca.pem key.pem > bundle.pem
curl -g --upload-file bundle.pem --cacert "${frontend_name}.ca.crt" --crlfile "${frontend_name}.crl" master-key-upload-url+authtoken
This replaces old request parameters:
* ``apache-certificate``
* ``apache-key``
* ``apache-ca-certificate``
(*Note*: They are still supported for backward compatibility, but any value send to the ``master-key-upload-url`` will supersede information from SlapOS Master.)
Slave partition
---------------
After requesting slave partition it will return ``key-generate-auth-url`` and ``key-upload-url``.
Doing HTTP GET on ``key-generate-auth-url`` will return authentication token, which is used to communicate with ``key-upload-url``. This token shall be stored securely.
By doing HTTP PUT to ``key-upload-url`` with appended authentication token it is possible to upload PEM bundle of certificate, key and any accompanying CA certificates to the master.
Example sessions is::
request(...)
curl -g -X GET --cacert "${frontend_name}.ca.crt" --crlfile "${frontend_name}.crl" key-generate-auth-url
> authtoken
cat certificate.pem ca.pem key.pem > bundle.pem
curl -g --upload-file bundle.pem --cacert "${frontend_name}.ca.crt" --crlfile "${frontend_name}.crl" key-upload-url+authtoken
This replaces old request parameters:
* ``ssl_crt``
* ``ssl_key``
* ``ssl_ca_crt``
(*Note*: They are still supported for backward compatibility, but any value send to the ``key-upload-url`` will supersede information from SlapOS Master.)
Instance Parameters
===================
Master Instance Parameters
--------------------------
The parameters for instances are described at `instance-caddy-input-schema.json `_.
Here some additional informations about the parameters listed, below:
domain
~~~~~~
Name of the domain to be used (example: mydomain.com). Sub domains of this domain will be used for the slave instances (example: instance12345.mydomain.com). It is then recommended to add a wild card in DNS for the sub domains of the chosen domain like::
*.mydomain.com. IN A 123.123.123.123
Using the IP given by the Master Instance. "domain" is a mandatory Parameter.
port
~~~~
Port used by Caddy. Optional parameter, defaults to 4443.
plain_http_port
~~~~~~~~~~~~~~~
Port used by Caddy to serve plain http (only used to redirect to https).
Optional parameter, defaults to 8080.
Slave Instance Parameters
-------------------------
The parameters for instances are described at `instance-slave-caddy-input-schema.json `_.
Here some additional informations about the parameters listed, below:
path
~~~~
Only used if type is "zope".
Will append the specified path to the "VirtualHostRoot" of the zope's VirtualHostMonster.
"path" is an optional parameter, ignored if not specified.
Example of value: "/erp5/web_site_module/hosting/"
url
~~~
URL of the backend to use, optional but will result with non functioning slave.
Example: http://mybackend.com/myresource
enable_cache
~~~~~~~~~~~~
Enables HTTP cache, optional.
health-check-*
~~~~~~~~~~~~~~
This set of parameters is used to control the way how the backend checks will be done. Such active checks can be really useful for `stale-if-error` caching technique and especially in case if backend is very slow to reply or to connect to.
`health-check-http-method` can be used to configure the HTTP method used to check the backend. Special method `CONNECT` can be used to check only for connection attempt.
Please be aware that the `health-check-timeout` is really short by default, so in case if `/` of the backend is slow to reply configure proper path with `health-check-http-path` to not mark such backend down too fast, before increasing the check timeout.
Thanks to using health-check it's possible to configure failover system. By providing `health-check-failover-url` or `health-check-failover-https-url` some special backend can be used to reply in case if original backend replies with error (codes like `5xx`). As a note one can setup this failover URL like `https://failover.example.com/?p=` so that the path from the incoming request will be passed as parameter. Additionally authentication to failover URL is supported with `health-check-authenticate-to-failover-backend` and SSL Proxy verification with `health-check-failover-ssl-proxy-verify` and `health-check-failover-ssl-proxy-ca-crt`.
Examples
========
Here are some example of how to make your SlapOS service available through an already deployed frontend.
Simple Example (default)
------------------------
Request slave frontend instance so that https://[1:2:3:4:5:6:7:8]:1234 will be
redirected and accessible from the proxy::
instance = request(
software_release=caddy_frontend,
software_type="RootSoftwareInstance",
partition_reference='my frontend',
shared=True,
partition_parameter_kw={
"url":"https://[1:2:3:4:5:6:7:8]:1234",
}
)
Zope Example (default)
----------------------
Request slave frontend instance using a Zope backend so that
https://[1:2:3:4:5:6:7:8]:1234 will be redirected and accessible from the
proxy::
instance = request(
software_release=caddy_frontend,
software_type="RootSoftwareInstance",
partition_reference='my frontend',
shared=True,
partition_parameter_kw={
"url":"https://[1:2:3:4:5:6:7:8]:1234",
"type":"zope",
}
)
Advanced example
-----------------
Request slave frontend instance using a Zope backend, with Varnish activated,
listening to a custom domain and redirecting to /erp5/ so that
https://[1:2:3:4:5:6:7:8]:1234/erp5/ will be redirected and accessible from
the proxy::
instance = request(
software_release=caddy_frontend,
software_type="RootSoftwareInstance",
partition_reference='my frontend',
shared=True,
partition_parameter_kw={
"url":"https://[1:2:3:4:5:6:7:8]:1234",
"enable_cache":"true",
"type":"zope",
"path":"/erp5",
"domain":"mycustomdomain.com",
}
)
Simple Example
---------------
Request slave frontend instance so that https://[1:2:3:4:5:6:7:8]:1234 will be::
instance = request(
software_release=caddy_frontend,
software_type="RootSoftwareInstance",
partition_reference='my frontend',
shared=True,
software_type="custom-personal",
partition_parameter_kw={
"url":"https://[1:2:3:4:5:6:7:8]:1234",
Simple Cache Example - XXX - to be written
------------------------------------------
Request slave frontend instance so that https://[1:2:3:4:5:6:7:8]:1234 will be::
instance = request(
software_release=caddy_frontend,
software_type="RootSoftwareInstance",
partition_reference='my frontend',
shared=True,
software_type="custom-personal",
partition_parameter_kw={
"url":"https://[1:2:3:4:5:6:7:8]:1234",
"domain": "www.example.org",
"enable_cache": "True",
Advanced example - XXX - to be written
--------------------------------------
Request slave frontend instance using custom apache configuration, willing to use cache and ssl certificates.
Listening to a custom domain and redirecting to /erp5/ so that
https://[1:2:3:4:5:6:7:8]:1234/erp5/ will be redirected and accessible from
the proxy::
instance = request(
software_release=caddy_frontend,
software_type="RootSoftwareInstance",
partition_reference='my frontend',
shared=True,
software_type="custom-personal",
partition_parameter_kw={
"url":"https://[1:2:3:4:5:6:7:8]:1234",
"enable_cache":"true",
"type":"zope",
"path":"/erp5",
"domain":"example.org",
"ssl_key":"-----BEGIN RSA PRIVATE KEY-----
XXXXXXX..........XXXXXXXXXXXXXXX
-----END RSA PRIVATE KEY-----",
"ssl_crt":'-----BEGIN CERTIFICATE-----
XXXXXXXXXXX.............XXXXXXXXXXXXXXXXXXX
-----END CERTIFICATE-----',
"ssl_ca_crt":'-----BEGIN CERTIFICATE-----
XXXXXXXXX...........XXXXXXXXXXXXXXXXX
-----END CERTIFICATE-----',
"ssl_csr":'-----BEGIN CERTIFICATE REQUEST-----
XXXXXXXXXXXXXXX.............XXXXXXXXXXXXXXXXXX
-----END CERTIFICATE REQUEST-----',
}
)
Promises
========
Note that in some cases promises will fail:
* not possible to request frontend slave for monitoring (monitoring frontend promise)
* no slaves present (configuration promise and others)
* no cached slave present (configuration promise and others)
This is known issue and shall be tackled soon.
KeDiFa
======
Additional partition with KeDiFa (Key Distribution Facility) is by default requested on the same computer as master frontend partition.
By adding to the request keys like ``-sla-kedifa-`` it is possible to provide SLA information for kedifa partition. Eg to put it on computer ``couscous`` it shall be ``-sla-kedifa-computer_guid: couscous``.
Also ``-kedifa-software-release-url`` can be used to override the software release for kedifa partition.
Notes
=====
It is not possible with slapos to listen to port <= 1024, because process are
not run as root.
Solution 1 (iptables)
---------------------
It is a good idea then to go on the node where the instance is
and set some ``iptables`` rules like (if using default ports)::
iptables -t nat -A PREROUTING -p tcp -d {public_ipv4} --dport 443 -j DNAT --to-destination {listening_ipv4}:4443
iptables -t nat -A PREROUTING -p tcp -d {public_ipv4} --dport 80 -j DNAT --to-destination {listening_ipv4}:8080
ip6tables -t nat -A PREROUTING -p tcp -d {public_ipv6} --dport 443 -j DNAT --to-destination {listening_ipv6}:4443
ip6tables -t nat -A PREROUTING -p tcp -d {public_ipv6} --dport 80 -j DNAT --to-destination {listening_ipv6}:8080
Where ``{public_ipv[46]}`` is the public IP of your server, or at least the LAN IP to where your NAT will forward to, and ``{listening_ipv[46]}`` is the private ipv4 (like 10.0.34.123) that the instance is using and sending as connection parameter.
Additionally in order to access the server by itself such entries are needed in ``OUTPUT`` chain (as the internal packets won't appear in the ``PREROUTING`` chain)::
iptables -t nat -A OUTPUT -p tcp -d {public_ipv4} --dport 443 -j DNAT --to {listening_ipv4}:4443
iptables -t nat -A OUTPUT -p tcp -d {public_ipv4} --dport 80 -j DNAT --to {listening_ipv4}:8080
ip6tables -t nat -A OUTPUT -p tcp -d {public_ipv6} --dport 443 -j DNAT --to {listening_ipv6}:4443
ip6tables -t nat -A OUTPUT -p tcp -d {public_ipv6} --dport 80 -j DNAT --to {listening_ipv6}:8080
Solution 2 (network capability)
-------------------------------
It is also possible to directly allow the service to listen on 80 and 443 ports using the following command::
setcap 'cap_net_bind_service=+ep' /opt/slapgrid/$CADDY_FRONTEND_SOFTWARE_RELEASE_MD5/go.work/bin/caddy
setcap 'cap_net_bind_service=+ep' /opt/slapgrid/$CADDY_FRONTEND_SOFTWARE_RELEASE_MD5/parts/6tunnel/bin/6tunnel
Then specify in the master instance parameters:
* set ``port`` to ``443``
* set ``plain_http_port`` to ``80``
Authentication to the backend
=============================
The cluster generates CA served by caucase, available with ``backend-client-caucase-url`` return parameter.
Then, each slave configured with ``authenticate-to-backend`` to true, will use a certificate signed by this CA while accessing https backend.
This allows backends to:
* restrict access only from some frontend clusters
* trust values (like ``X-Forwarded-For``) sent by the frontend
Technical notes
===============
Profile development guidelines
------------------------------
Keep the naming in instance profiles:
* ``software_parameter_dict`` for values coming from software
* ``instance_parameter_dict`` for **local** values generated by the instance, except ``configuration``
* ``slapparameter_dict`` for values coming from SlapOS Master
Instantiated cluster structure
------------------------------
Instantiating caddy-frontend results with a cluster in various partitions:
* master (the controlling one)
* kedifa (contains kedifa server)
* caddy-frontend-N which contains the running processes to serve sites - this partition can be replicated by ``-frontend-quantity`` parameter
It means sites are served in ``caddy-frontend-N`` partition, and this partition is structured as:
* Caddy serving the browser [client-facing-caddy]
* (optional) Apache Traffic Server for caching [ats]
* Haproxy as a way to communicate to the backend [backend-facing-haproxy]
* some other additional tools (6tunnel, monitor, etc)
In case of slaves without cache (``enable_cache = False``) the request will travel as follows::
client-facing-caddy --> backend-facing-haproxy --> backend
In case of slaves using cache (``enable_cache = True``) the request will travel as follows::
client-facing-caddy --> ats --> backend-facing-haproxy --> backend
Usage of Haproxy as a relay to the backend allows much better control of the backend, removes the hassle of checking the backend from Caddy and allows future developments like client SSL certificates to the backend or even health checks.
Kedifa implementation
---------------------
`Kedifa `_ server runs on kedifa partition.
Each `caddy-frontend-N` partition downloads certificates from the kedifa server.
Caucase (exposed by ``kedifa-caucase-url`` in master partition parameters) is used to handle certificates for authentication to kedifa server.
If ``automatic-internal-kedifa-caucase-csr`` is enabled (by default it is) there are scripts running on master partition to simulate human to sign certificates for each caddy-frontend-N node.
Support for X-Real-Ip and X-Forwarded-For
-----------------------------------------
X-Forwarded-For and X-Real-Ip are transmitted to the backend, but only for IPv4 access to the frontend. In case of IPv6 access, the provided IP will be wrong, because of using 6tunnel.
Automatic Internal Caucase CSR
------------------------------
Cluster is composed on many instances, which are landing on separate partitions, so some way is needed to bootstrap trust between the partitions.
There are two ways to achieve it:
* use default, Automatic Internal Caucase CSR used to replace human to sign CSRs against internal CAUCASEs automatic bootstrap, which leads to some issues, described later
* switch to manual bootstrap, which requires human to create and manage user certificate (with caucase-updater) and then sign new frontend nodes appearing in the system
The issues during automatic bootstrap are:
* rouge or hacked SlapOS Master can result with adding rouge frontend nodes to the cluster, which will be trusted, so it will be possible to fetch all certificates and keys from Kedifa or to login to backends
* when new node is added there is short window, when rouge person is able to trick automatic signing, and have it's own node added
In both cases promises will fail on node which is not able to get signed, but in case of Kedifa the damage already happened (certificates and keys are compromised). So in case if cluster administrator wants to stay on the safe side, both automatic bootstraps shall be turned off.
How the automatic signing works
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Having in mind such structure:
* instance with caucase: ``caucase-instance``
* N instances which want to get their CSR signed: ``csr-instance``
In ``caucase-instance`` CAUCASE user is created by automatically signing one user certificate, which allows to sign service certificates.
The ``csr-instance`` creates CSR, extracts the ID of the CSR, exposes it via HTTP and ask caucase on ``caucase-instance`` to sign it. The ``caucase-instance`` checks that exposed CSR id matches the one send to caucase and by using created user to signs it.
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/buildout.hash.cfg 0000664 0000000 0000000 00000007645 14241130220 0031726 0 ustar 00root root 0000000 0000000 # THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[template]
filename = instance.cfg.in
md5sum = 051ae51b86f9aba169a6777fa2239901
[profile-common]
filename = instance-common.cfg.in
md5sum = 5784bea3bd608913769ff9a8afcccb68
[profile-caddy-frontend]
filename = instance-apache-frontend.cfg.in
md5sum = 02ce5d44d49982fda598e3086cfbca99
[profile-caddy-replicate]
filename = instance-apache-replicate.cfg.in
md5sum = 57388e76c7e61b3d7213df8aac0b407d
[profile-slave-list]
_update_hash_filename_ = templates/apache-custom-slave-list.cfg.in
md5sum = 964a7f673f441f3a3e90c88ab03e3351
[profile-replicate-publish-slave-information]
_update_hash_filename_ = templates/replicate-publish-slave-information.cfg.in
md5sum = be54431846fe7f3cee65260eefc83d62
[profile-caddy-frontend-configuration]
_update_hash_filename_ = templates/Caddyfile.in
md5sum = fdf46b1dee6ea6b91b9aa9e322a0530d
[template-not-found-html]
_update_hash_filename_ = templates/notfound.html
md5sum = 88af61e7abbf30dc99a1a2526161128d
[template-default-slave-virtualhost]
_update_hash_filename_ = templates/default-virtualhost.conf.in
md5sum = 57c86795293b11300a036f5f8cf2c868
[template-backend-haproxy-configuration]
_update_hash_filename_ = templates/backend-haproxy.cfg.in
md5sum = 6d4ad68ac44ccc72fe9148bd8e05a6f0
[template-empty]
_update_hash_filename_ = templates/empty.in
md5sum = 7155b18edfe128825b8d1f48071454a6
[template-wrapper]
_update_hash_filename_ = templates/wrapper.in
md5sum = 975177dedf677d24e14cede5d13187ce
[template-trafficserver-records-config]
_update_hash_filename_ = templates/trafficserver/records.config.jinja2
md5sum = 715baa302d562a7e4eddc3d1bf72f981
[template-trafficserver-storage-config]
_update_hash_filename_ = templates/trafficserver/storage.config.jinja2
md5sum = d022455a8610bac2dd51101edb035987
[template-trafficserver-logging-yaml]
_update_hash_filename_ = templates/trafficserver/logging.yaml.jinja2
md5sum = 368b271215a92594ca9e2fa3102d484f
[template-caddy-lazy-script-call]
_update_hash_filename_ = templates/apache-lazy-script-call.sh.in
md5sum = 77d60840591de67b64ab3572e46273a0
[template-graceful-script]
_update_hash_filename_ = templates/graceful-script.sh.in
md5sum = 061cc244558fd3af2b6bacf17cae5555
[template-validate-script]
_update_hash_filename_ = templates/validate-script.sh.in
md5sum = 53e5d7ba2827bff003051f74f24ffe4f
[template-configuration-state-script]
_update_hash_filename_ = templates/configuration-state-script.sh.in
md5sum = 4d2537d2698d32a7e909989f8778d144
[template-rotate-script]
_update_hash_filename_ = templates/rotate-script.sh.in
md5sum = 8c150e1e6c993708d31936742f3a7302
[caddyprofiledeps-setup]
filename = setup.py
md5sum = f6f72d03af7d9dc29fb4d4fef1062e73
[caddyprofiledeps-dummy]
filename = caddyprofiledummy.py
md5sum = b41b8de115ad815d0b0db306ad650365
[profile-kedifa]
filename = instance-kedifa.cfg.in
md5sum = b5426129668f39ace55f14012c4a2fd2
[template-backend-haproxy-rsyslogd-conf]
_update_hash_filename_ = templates/backend-haproxy-rsyslogd.conf.in
md5sum = 3336d554661b138dcef97b1d1866803c
[template-slave-introspection-httpd-nginx]
_update_hash_filename_ = templates/slave-introspection-httpd-nginx.conf.in
md5sum = 3067e6ba6c6901821d57d2109517d39c
[template-expose-csr-nginx-conf]
_update_hash_filename_ = templates/expose-csr-nginx.conf.in
md5sum = 5620baa8819fcc8340fa6777ee551a1a
caddyprofiledummy.py 0000664 0000000 0000000 00000006450 14241130220 0032501 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend from __future__ import print_function
import caucase.client
import caucase.utils
import os
import ssl
import sys
import urllib
import urlparse
from cryptography import x509
from cryptography.hazmat.primitives import serialization
class Recipe(object):
def __init__(self, *args, **kwargs):
pass
def install(self):
return []
def update(self):
return self.install()
def validate_netloc(netloc):
# a bit crazy way to validate that the passed parameter is haproxy
# compatible server netloc
parsed = urlparse.urlparse('scheme://'+netloc)
if ':' in parsed.hostname:
hostname = '[%s]' % parsed.hostname
else:
hostname = parsed.hostname
return netloc == '%s:%s' % (hostname, parsed.port)
def _check_certificate(url, certificate):
parsed = urlparse.urlparse(url)
got_certificate = ssl.get_server_certificate((parsed.hostname, parsed.port))
if certificate.strip() != got_certificate.strip():
raise ValueError('Certificate for %s does not match expected one' % (url,))
def _get_exposed_csr(url, certificate):
_check_certificate(url, certificate)
self_signed = ssl.create_default_context()
self_signed.check_hostname = False
self_signed.verify_mode = ssl.CERT_NONE
return urllib.urlopen(url, context=self_signed).read()
def _get_caucase_client(ca_url, ca_crt, user_key):
return caucase.client.CaucaseClient(
ca_url=ca_url + '/cas',
ca_crt_pem_list=caucase.utils.getCertList(ca_crt),
user_key=user_key,
)
def _get_caucase_csr_list(ca_url, ca_crt, user_key):
csr_list = []
for entry in _get_caucase_client(
ca_url, ca_crt, user_key).getPendingCertificateRequestList():
csr = caucase.utils.load_certificate_request(
caucase.utils.toBytes(entry['csr']))
csr_list.append({
'csr_id': entry['id'],
'csr': csr.public_bytes(serialization.Encoding.PEM).decode()
})
return csr_list
def _csr_match(*csr_list):
number_list = set([])
for csr in csr_list:
number_list.add(
x509.load_pem_x509_csr(str(csr)).public_key().public_numbers())
return len(number_list) == 1
def _sign_csr(ca_url, ca_crt, user_key, csr, csr_list):
signed = False
client = _get_caucase_client(ca_url, ca_crt, user_key)
for csr_entry in csr_list:
if _csr_match(csr, csr_entry['csr']):
client.createCertificate(int(csr_entry['csr_id']))
print('Signed csr with id %s' % (csr_entry['csr_id'],))
signed = True
break
return signed
def _mark_done(filename):
with open(filename, 'w') as fh:
fh.write('done')
print('Marked file %s' % (filename,))
def _is_done(filename):
if os.path.exists(filename):
return True
return False
def smart_sign():
ca_url, ca_crt, done_file, user_key, csr_url, \
csr_url_certificate = sys.argv[1:]
if _is_done(done_file):
return
exposed_csr = _get_exposed_csr(csr_url, csr_url_certificate)
caucase_csr_list = _get_caucase_csr_list(ca_url, ca_crt, user_key)
if _sign_csr(
ca_url, ca_crt, user_key, exposed_csr, caucase_csr_list):
_mark_done(done_file)
else:
print('Failed to sign %s' % (csr_url,))
def caucase_csr_sign_check():
ca_url, ca_crt, user_key = sys.argv[1:]
if len(_get_caucase_csr_list(ca_url, ca_crt, user_key)) != 0:
print('ERR There are CSR to sign on %s' % (ca_url,))
sys.exit(1)
else:
print('OK No CSR to sign on %s' % (ca_url,))
instance-apache-frontend.cfg.in 0000664 0000000 0000000 00000123751 14241130220 0034340 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend {% import "caucase" as caucase with context %}
{%- set TRUE_VALUES = ['y', 'yes', '1', 'true'] -%}
[buildout]
extends =
{{ software_parameter_dict['profile_common'] }}
{{ software_parameter_dict['profile_monitor'] }}
{{ software_parameter_dict['profile_logrotate_base'] }}
parts =
directory
logrotate-entry-caddy
caddy-frontend
caddyprofiledeps
switch-caddy-softwaretype
caucase-updater
caucase-updater-promise
backend-client-caucase-updater
backend-client-caucase-updater-promise
frontend-caddy-graceful
port-redirection
promise-frontend-caddy-configuration
promise-caddy-frontend-v4-https
promise-caddy-frontend-v4-http
promise-caddy-frontend-v6-https
promise-caddy-frontend-v6-http
promise-logrotate-setup
trafficserver-launcher
trafficserver-reload
trafficserver-configuration-directory
trafficserver-records-config
trafficserver-remap-config
trafficserver-plugin-config
trafficserver-storage-config
trafficserver-ip-allow-config
trafficserver-logging-yaml
trafficserver-promise-listen-port
trafficserver-promise-cache-availability
cron-entry-logrotate-trafficserver
## Monitor for Caddy
monitor-base
monitor-ats-cache-stats-wrapper
monitor-traffic-summary-last-stats-wrapper
monitor-caddy-server-status-wrapper
monitor-verify-re6st-connectivity
backend-haproxy-rsyslogd-configuration
backend-haproxy-rsyslogd
logrotate-entry-backend-haproxy
backend-haproxy
backend-haproxy-graceful
promise-backend-haproxy-http
promise-backend-haproxy-https
promise-backend-haproxy-configuration
slave-introspection-frontend
slave-introspection-graceful
promise-slave-introspection-https
promise-slave-introspection-configuration
logrotate-entry-slave-introspection
backend-haproxy-statistic-frontend-promise
[caddyprofiledeps]
recipe = caddyprofiledeps
[frontend-node-id]
# Store id file in top of hierarchy, so it does not depend on directory creation
file = ${buildout:directory}/.frontend-node-id.txt
recipe = slapos.recipe.build
init =
import os
import secrets
if not os.path.exists(options['file']):
with open(options['file'], 'w') as fh:
fh.write(secrets.token_urlsafe(4))
with open(options['file'], 'r') as fh:
options['value'] = fh.read()
[frontend-node-private-salt]
# Private, not communicated, stable hash, which can be used to salt other
# hashes, so their values are connected to the node, but practicaly impossible
# to crack (until the node is hacked itself, but then those values are
# stolen anyway)
recipe = slapos.recipe.build
init =
import os
import uuid
if not os.path.exists(options['file']):
with open(options['file'], 'w') as fh:
fh.write(uuid.uuid4().hex)
with open(options['file'], 'r') as fh:
options['value'] = fh.read()
file = ${buildout:directory}/.frontend-node-private-salt.txt
[version-hash]
recipe = slapos.recipe.build
software-release-url = ${slap-connection:software-release-url}
hash-salt = ${frontend-node-private-salt:value}
init =
import hashlib
import base64
options['value'] = base64.urlsafe_b64encode(hashlib.md5(''.join([options['software-release-url'].strip(), options['hash-salt']])).digest())
[frontend-node-information]
recipe = slapos.recipe.build
file = ${buildout:directory}/.frontend-node-information.json
node-id = ${frontend-node-id:value}
current-hash = ${version-hash:value}
current-software-release-url = ${version-hash:software-release-url}
init =
import json
changed = False
try:
with open(options['file'], 'r') as fh:
data = json.load(fh)
except Exception:
changed = True
data = {
'node-id': options['node-id'],
'version-hash-history': {options['current-hash']: options['current-software-release-url']}
}
if 'node-id' not in data:
data['node-id'] = options['node-id']
changed = True
if 'version-hash-history' not in data:
data['version-hash-history'] = {}
changed = True
if options['current-hash'] not in data['version-hash-history']:
data['version-hash-history'][options['current-hash']] = options['current-software-release-url']
changed = True
if changed:
with open(options['file'], 'w') as fh:
json.dump(data, fh)
options['value'] = data
# Create all needed directories
[directory]
recipe = slapos.cookbook:mkdirectory
bin = ${buildout:directory}/bin/
etc = ${buildout:directory}/etc/
srv = ${buildout:directory}/srv/
var = ${buildout:directory}/var/
tmp = ${:var}/tmp
template = ${buildout:directory}/template/
backup = ${:srv}/backup
log = ${:var}/log
run = ${:var}/run
backend-haproxy-rsyslogd-spool = ${:run}/backend-haproxy-rsyslogd-spool
service = ${:etc}/service
etc-run = ${:etc}/run
ca-dir = ${:srv}/ssl
backend-client-dir = ${:srv}/backend-client
# BBB: SlapOS Master non-zero knowledge BEGIN
bbb-ssl-dir = ${:srv}/bbb-ssl
# BBB: SlapOS Master non-zero knowledge END
frontend_cluster = ${:var}/frontend_cluster
# CSR publication
expose-csr = ${:srv}/expose-csr
expose-csr-etc = ${:etc}/expose-csr
expose-csr-var = ${:var}/expose-csr
# slave introspection
slave-introspection-var = ${:var}/slave-introspection
[switch-caddy-softwaretype]
recipe = slapos.cookbook:switch-softwaretype
default = dynamic-custom-personal-profile-slave-list:output
RootSoftwareInstance = ${:default}
single-default = dynamic-custom-personal-profile-slave-list:output
single-custom-personal = dynamic-custom-personal-profile-slave-list:output
[frontend-configuration]
ip-access-certificate = ${self-signed-ip-access:certificate}
caddy-ipv6 = {{ instance_parameter_dict['ipv6-random'] }}
caddy-https-port = ${configuration:port}
slave-introspection-configuration = ${directory:etc}/slave-introspection-httpd-nginx.conf
slave-introspection-https-port = ${configuration:slave-introspection-https-port}
slave-introspection-secure_access = ${slave-introspection-frontend:connection-secure_access}
slave-introspection-domain = ${slave-introspection-frontend:connection-domain}
[self-signed-ip-access]
# Self Signed certificate for HTTPS IP accesses to the frontend
recipe = plone.recipe.command
update-command = ${:command}
ipv6 = ${slap-configuration:ipv6-random}
ipv4 = {{instance_parameter_dict['ipv4-random']}}
certificate = ${caddy-directory:master-autocert-dir}/ip-access-${:ipv6}-${:ipv4}.crt
{#- Can be stopped on error, as does not rely on self provided service #}
stop-on-error = True
command =
[ -f ${:certificate} ] && exit 0
rm -f ${:certificate}
/bin/bash -c ' \
{{ software_parameter_dict['openssl'] }} req \
-new -newkey rsa:2048 -sha256 \
-nodes -x509 -days 36500 \
-keyout ${:certificate} \
-subj "/CN=Self Signed IP Access" \
-reqexts SAN \
-extensions SAN \
-config <(cat {{ software_parameter_dict['openssl_cnf'] }} \
<(printf "\n[SAN]\nsubjectAltName=IP:${:ipv6},IP:${:ipv4}")) \
-out ${:certificate}'
[self-signed-fallback-access]
# Self Signed certificate for HTTPS access to the frontend with fallback certificate
recipe = plone.recipe.command
update-command = ${:command}
ipv6 = ${slap-configuration:ipv6-random}
ipv4 = {{instance_parameter_dict['ipv4-random']}}
certificate = ${caddy-directory:master-autocert-dir}/fallback-access.crt
{#- Can be stopped on error, as does not rely on self provided service #}
stop-on-error = True
command =
[ -f ${:certificate} ] && exit 0
rm -f ${:certificate}
/bin/bash -c ' \
{{ software_parameter_dict['openssl'] }} req \
-new -newkey rsa:2048 -sha256 \
-nodes -x509 -days 36500 \
-keyout ${:certificate} \
-subj "/CN=Fallback certificate/OU={{ instance_parameter_dict['configuration.frontend-name'] }}" \
-out ${:certificate}'
[jinja2-template-base]
recipe = slapos.recipe.template:jinja2
output = ${buildout:directory}/${:filename}
extensions = jinja2.ext.do
extra-context =
slapparameter_dict = {{ dumps(slapparameter_dict) }}
slap_software_type = {{ dumps(instance_parameter_dict['slap-software-type']) }}
context =
import json_module json
raw profile_common {{ software_parameter_dict['profile_common'] }}
raw profile_logrotate_base {{ software_parameter_dict['profile_logrotate_base'] }}
raw profile_monitor {{ software_parameter_dict['profile_monitor'] }}
key slap_software_type :slap_software_type
key slapparameter_dict :slapparameter_dict
section directory directory
${:extra-context}
[software-release-path]
template-empty = {{ software_parameter_dict['template_empty'] }}
template-default-slave-virtualhost = {{ software_parameter_dict['template_default_slave_virtualhost'] }}
template-backend-haproxy-configuration = {{ software_parameter_dict['template_backend_haproxy_configuration'] }}
template-backend-haproxy-rsyslogd-conf = {{ software_parameter_dict['template_backend_haproxy_rsyslogd_conf'] }}
template-expose-csr-nginx-conf = {{ software_parameter_dict['template_expose_csr_nginx_conf'] }}
[kedifa-login-config]
d = ${directory:ca-dir}
template-csr = ${:d}/kedifa-login-template-csr.pem
key = ${:d}/kedifa-login-certificate.pem
certificate = ${:key}
ca-certificate = ${:d}/kedifa-caucase-ca.pem
cas-ca-certificate = ${:d}/kedifa-cas-caucase-ca.pem
crl = ${:d}/kedifa-login-crl.pem
[kedifa-login-csr]
recipe = plone.recipe.command
organization = {{ slapparameter_dict['cluster-identification'] }}
organizational_unit = {{ instance_parameter_dict['configuration.frontend-name'] }}
command =
{% if slapparameter_dict['kedifa-caucase-url'] %}
if [ ! -f ${:template-csr} ] && [ ! -f ${:key} ] ; then
{{ software_parameter_dict['openssl'] }} req -new -sha256 \
-newkey rsa:2048 -nodes -keyout ${:key} \
-subj "/O=${:organization}/OU=${:organizational_unit}" \
-out ${:template-csr}
fi
{% endif %}
test -f ${:key} && test -f ${:template-csr}
update-command = ${:command}
template-csr = ${kedifa-login-config:template-csr}
key = ${kedifa-login-config:key}
{#- Can be stopped on error, as does not rely on self provided service #}
stop-on-error = True
{{ caucase.updater(
prefix='caucase-updater',
buildout_bin_directory=software_parameter_dict['bin_directory'],
updater_path='${directory:service}/kedifa-login-certificate-caucase-updater',
url=slapparameter_dict['kedifa-caucase-url'],
data_dir='${directory:srv}/caucase-updater',
crt_path='${kedifa-login-config:certificate}',
ca_path='${kedifa-login-config:ca-certificate}',
crl_path='${kedifa-login-config:crl}',
key_path='${kedifa-login-csr:key}',
template_csr='${kedifa-login-csr:template-csr}'
)}}
[kedifa-configuration]
caucase-url = {{ slapparameter_dict['kedifa-caucase-url'] }}
ca-certificate = ${kedifa-login-config:ca-certificate}
certificate = ${kedifa-login-config:certificate}
cas-ca-certificate = ${kedifa-login-config:cas-ca-certificate}
csr = ${caucase-updater-csr:csr}
crl = ${kedifa-login-config:crl}
kedifa-updater-mapping-file = ${directory:etc}/kedifa_updater_mapping.txt
kedifa-updater-state-file = ${directory:srv}/kedifa_updater_state.json
slave_kedifa_information = {{ dumps(slapparameter_dict['slave-kedifa-information']) }}
[backend-client-login-config]
d = ${directory:backend-client-dir}
template-csr = ${:d}/csr.pem
key = ${:d}/certificate.pem
certificate = ${:key}
ca-certificate = ${:d}/ca.pem
cas-ca-certificate = ${:d}/cas-ca.pem
crl = ${:d}/crl.pem
[backend-client-login-csr]
recipe = plone.recipe.command
organization = {{ slapparameter_dict['cluster-identification'] }}
organizational_unit = {{ instance_parameter_dict['configuration.frontend-name'] }}
command =
{% if slapparameter_dict['backend-client-caucase-url'] %}
if [ ! -f ${:template-csr} ] && [ ! -f ${:key} ] ; then
{{ software_parameter_dict['openssl'] }} req -new -sha256 \
-newkey rsa:2048 -nodes -keyout ${:key} \
-subj "/O=${:organization}/OU=${:organizational_unit}" \
-out ${:template-csr}
fi
{% endif %}
test -f ${:key} && test -f ${:template-csr}
update-command = ${:command}
template-csr = ${backend-client-login-config:template-csr}
key = ${backend-client-login-config:key}
{#- Can be stopped on error, as does not rely on self provided service #}
stop-on-error = True
{{ caucase.updater(
prefix='backend-client-caucase-updater',
buildout_bin_directory=software_parameter_dict['bin_directory'],
updater_path='${directory:service}/backend-client-login-certificate-caucase-updater',
url=slapparameter_dict['backend-client-caucase-url'],
data_dir='${directory:srv}/backend-client-caucase-updater',
crt_path='${backend-client-login-config:certificate}',
ca_path='${backend-client-login-config:ca-certificate}',
crl_path='${backend-client-login-config:crl}',
key_path='${backend-client-login-csr:key}',
template_csr='${backend-client-login-csr:template-csr}'
)}}
[dynamic-custom-personal-profile-slave-list]
< = jinja2-template-base
depends = ${caddyprofiledeps:recipe}
url = {{ software_parameter_dict['profile_slave_list'] }}
filename = custom-personal-instance-slave-list.cfg
master_key_download_url = {{ dumps(slapparameter_dict['master-key-download-url']) }}
software_type = single-custom-personal
organization = {{ slapparameter_dict['cluster-identification'] }}
organizational-unit = {{ instance_parameter_dict['configuration.frontend-name'] }}
backend-client-caucase-url = {{ slapparameter_dict['backend-client-caucase-url'] }}
partition_ipv6 = ${slap-configuration:ipv6-random}
extra-context =
key caddy_configuration_directory caddy-directory:slave-configuration
key backend_client_caucase_url :backend-client-caucase-url
import urlparse_module urlparse
import furl_module furl
import urllib_module urllib
key master_key_download_url :master_key_download_url
key autocert caddy-directory:autocert
key caddy_log_directory caddy-directory:slave-log
key expose_csr_organization :organization
key expose_csr_organizational_unit :organizational-unit
key global_ipv6 slap-configuration:ipv6-random
key empty_template software-release-path:template-empty
key template_default_slave_configuration software-release-path:template-default-slave-virtualhost
key template_expose_csr_nginx_conf software-release-path:template-expose-csr-nginx-conf
key software_type :software_type
key frontend_lazy_graceful_reload frontend-caddy-lazy-graceful:output
key monitor_base_url monitor-instance-parameter:monitor-base-url
key node_id frontend-node-id:value
key version_hash version-hash:value
key software_release_url version-hash:software-release-url
key node_information frontend-node-information:value
key custom_ssl_directory caddy-directory:custom-ssl-directory
# BBB: SlapOS Master non-zero knowledge BEGIN
key apache_certificate apache-certificate:output
# BBB: SlapOS Master non-zero knowledge END
## backend haproxy
key template_backend_haproxy_configuration software-release-path:template-backend-haproxy-configuration
## Configuration passed by section
section configuration configuration
section backend_haproxy_configuration backend-haproxy-configuration
section instance_parameter_dict instance-parameter-section
section frontend_configuration frontend-configuration
section caddy_configuration caddy-configuration
section kedifa_configuration kedifa-configuration
section software_parameter_dict software-parameter-section
# Deploy Caddy Frontend with Jinja power
[dynamic-caddy-frontend-template]
< = jinja2-template-base
url = {{ software_parameter_dict['template_caddy_frontend_configuration'] }}
output = ${caddy-configuration:frontend-configuration}
local_ipv4 = {{ dumps(instance_parameter_dict['ipv4-random']) }}
extra-context =
key instance_home buildout:directory
key master_certificate caddy-configuration:master-certificate
key access_log caddy-configuration:access-log
key slave_configuration_directory caddy-directory:slave-configuration
section frontend_configuration frontend-configuration
key http_port configuration:plain_http_port
key https_port configuration:port
key global_ipv6 slap-configuration:ipv6-random
key local_ipv4 :local_ipv4
key error_log caddy-configuration:error-log
key not_found_file caddy-configuration:not-found-file
key username monitor-instance-parameter:username
key password monitor-htpasswd:passwd
# BBB: SlapOS Master non-zero knowledge BEGIN
key apache_certificate apache-certificate:output
# BBB: SlapOS Master non-zero knowledge END
[caddy-wrapper]
recipe = slapos.recipe.template:jinja2
inline =
#!/bin/sh
export CADDYPATH=${directory:frontend_cluster}
ulimit -n $(ulimit -Hn)
exec {{ software_parameter_dict['caddy'] }} \
-conf ${dynamic-caddy-frontend-template:output} \
-log ${caddy-configuration:error-log} \
-log-roll-mb 0 \
{% if instance_parameter_dict['configuration.global-disable-http2'].lower() in TRUE_VALUES %}-http2=false \{% else %}-http2=true \{% endif %}
-grace {{ instance_parameter_dict['configuration.mpm-graceful-shutdown-timeout'] }}s \
-disable-http-challenge \
-disable-tls-alpn-challenge \
"$@"
output = ${directory:bin}/caddy-wrapper
[caddy-frontend]
recipe = slapos.cookbook:wrapper
command-line = ${caddy-wrapper:output} -pidfile ${caddy-configuration:pid-file}
wrapper-path = ${directory:service}/frontend_caddy
hash-existing-files = ${buildout:directory}/software_release/buildout.cfg
hash-files = ${caddy-wrapper:output}
[not-found-html]
recipe = plone.recipe.command
update-command = ${:command}
filename = notfound.html
command = ln -sf {{ software_parameter_dict['template_not_found_html'] }} ${caddy-directory:document-root}/${:filename}
[caddy-directory]
recipe = slapos.cookbook:mkdirectory
document-root = ${directory:srv}/htdocs
slave-configuration = ${directory:etc}/caddy-slave-conf.d/
slave-log = ${directory:log}/httpd
autocert = ${directory:srv}/autocert
master-autocert-dir = ${:autocert}/master-autocert
custom-ssl-directory = ${:slave-configuration}/ssl
[caddy-configuration]
frontend-configuration = ${directory:etc}/Caddyfile
access-log = ${directory:log}/frontend-access.log
error-log = ${directory:log}/frontend-error.log
pid-file = ${directory:run}/httpd.pid
frontend-graceful-command = ${frontend-caddy-validate:output} && kill -USR1 $(cat ${:pid-file})
not-found-file = ${caddy-directory:document-root}/${not-found-html:filename}
master-certificate = ${caddy-directory:master-autocert-dir}/master.pem
# Communication with ATS
cache-port = ${trafficserver-variable:input-port}
# slave instrspection
slave-introspection-access-log = ${directory:log}/slave-introspection-access.log
slave-introspection-error-log = ${directory:log}/slave-introspection-error.log
slave-introspection-pid-file = ${directory:run}/slave-introspection.pid
slave-introspection-graceful-command = ${slave-introspection-validate:output} && kill -HUP $(cat ${:slave-introspection-pid-file})
# BBB: SlapOS Master non-zero knowledge BEGIN
[get-self-signed-fallback-access]
recipe = collective.recipe.shelloutput
commands =
certificate = cat ${self-signed-fallback-access:certificate}
[apache-certificate]
recipe = slapos.recipe.template:jinja2
inline =
{% raw %}
{{ certificate or fallback_certificate }}
{{ key or '' }}
{% endraw %}
context =
key certificate configuration:apache-certificate
key key configuration:apache-key
key fallback_certificate get-self-signed-fallback-access:certificate
output = ${directory:bbb-ssl-dir}/frontend.crt
# BBB: SlapOS Master non-zero knowledge END
[logrotate-entry-caddy]
<= logrotate-entry-base
name = caddy
log = ${caddy-configuration:error-log} ${caddy-configuration:access-log}
rotate-num = ${configuration:rotate-num}
# Note: Slaves do not define their own reload, as this would be repeated,
# because sharedscripts work per entry, and each slave needs its own
# olddir
# Here we trust that there will be something to be rotated with error
# or access log, and that this will trigger postrotate script.
post = ${frontend-caddy-lazy-graceful:output} &
delaycompress =
#################
# Trafficserver
#################
[trafficserver-directory]
recipe = slapos.cookbook:mkdirectory
configuration = ${directory:etc}/trafficserver
local-state = ${directory:var}/trafficserver
bin_path = {{ software_parameter_dict['trafficserver'] }}/bin
log = ${directory:log}/trafficserver
cache-path = ${directory:srv}/ats_cache
logrotate-backup = ${logrotate-directory:logrotate-backup}/trafficserver
[trafficserver-variable]
wrapper-path = ${directory:service}/trafficserver
reload-path = ${directory:etc-run}/trafficserver-reload
local-ip = {{ instance_parameter_dict['ipv4-random'] }}
input-port = 23432
hostname = ${configuration:frontend-name}
plugin-config =
ip-allow-config = src_ip=0.0.0.0-255.255.255.255 action=ip_allow
cache-path = ${trafficserver-directory:cache-path}
disk-cache-size = ${configuration:disk-cache-size}
ram-cache-size = ${configuration:ram-cache-size}
templates-dir = {{ software_parameter_dict['trafficserver'] }}/etc/trafficserver/body_factory
request-timeout = ${configuration:request-timeout}
version-hash = ${version-hash:value}
node-id = ${frontend-node-id:value}
[trafficserver-configuration-directory]
recipe = plone.recipe.command
command = cp -rn {{ software_parameter_dict['trafficserver'] }}/etc/trafficserver/* ${:target}
target = ${trafficserver-directory:configuration}
[trafficserver-launcher]
recipe = slapos.cookbook:wrapper
command-line = {{ software_parameter_dict['trafficserver'] }}/bin/traffic_manager
wrapper-path = ${trafficserver-variable:wrapper-path}
environment =
TS_ROOT=${buildout:directory}
PROXY_CONFIG_CONFIG_DIR=${trafficserver-directory:configuration}
hash-existing-files = ${buildout:directory}/software_release/buildout.cfg
[trafficserver-reload]
recipe = slapos.cookbook:wrapper
command-line = {{ software_parameter_dict['trafficserver'] }}/bin/traffic_ctl config reload
wrapper-path = ${trafficserver-variable:reload-path}
environment = TS_ROOT=${buildout:directory}
# XXX Dedicated Jinja Section without slapparameter
[trafficserver-jinja2-template-base]
recipe = slapos.recipe.template:jinja2
output = ${trafficserver-directory:configuration}/${:filename}
extra-context =
context =
section ats_directory trafficserver-directory
section ats_configuration trafficserver-variable
${:extra-context}
[trafficserver-records-config]
< = trafficserver-jinja2-template-base
url = {{ software_parameter_dict['template_trafficserver_records_config'] }}
filename = records.config
extra-context =
import os_module os
[trafficserver-storage-config]
< = trafficserver-jinja2-template-base
url = {{ software_parameter_dict['template_trafficserver_storage_config'] }}
filename = storage.config
[trafficserver-logging-yaml]
< = trafficserver-jinja2-template-base
url = {{ software_parameter_dict['template_trafficserver_logging_yaml'] }}
filename = logging.yaml
[trafficserver-remap-config]
<= trafficserver-jinja2-template-base
{%- raw %}
inline =
map /HTTPS/ http://{{ ipv4 }}:{{ https_port }}
map /HTTP/ http://{{ ipv4 }}:{{ http_port }}
{%- endraw %}
extra-context =
raw ipv4 {{ instance_parameter_dict['ipv4-random'] }}
key https_port backend-haproxy-configuration:https-port
key http_port backend-haproxy-configuration:http-port
filename = remap.config
[trafficserver-plugin-config]
< = trafficserver-jinja2-template-base
url = {{ software_parameter_dict['template_empty'] }}
filename = plugin.config
context =
key content trafficserver-variable:plugin-config
[trafficserver-ip-allow-config]
< = trafficserver-jinja2-template-base
url = {{ software_parameter_dict['template_empty'] }}
filename = ip_allow.config
context =
key content trafficserver-variable:ip-allow-config
[trafficserver-promise-listen-port]
<= monitor-promise-base
promise = check_socket_listening
name = trafficserver-port-listening.py
config-host = ${trafficserver-variable:local-ip}
config-port = ${trafficserver-variable:input-port}
[trafficserver-ctl]
recipe = slapos.cookbook:wrapper
command-line = {{ software_parameter_dict['trafficserver'] }}/bin/traffic_ctl
wrapper-path = ${directory:bin}/traffic_ctl
environment = TS_ROOT=${buildout:directory}
[trafficserver-promise-cache-availability]
<= monitor-promise-base
promise = trafficserver_cache_availability
name = trafficserver-cache-availability.py
config-wrapper-path = ${trafficserver-ctl:wrapper-path}
[trafficserver-rotate-script]
< = jinja2-template-base
url = {{ software_parameter_dict['template_rotate_script'] }}
output = ${directory:bin}/trafficserver-rotate
xz_binary = {{ software_parameter_dict['xz_location'] ~ '/bin/xz' }}
pattern = *.old
# days to keep log files
keep_days = 365
extra-context =
key log_dir trafficserver-directory:log
key rotate_dir trafficserver-directory:logrotate-backup
key xz_binary :xz_binary
key keep_days :keep_days
key pattern :pattern
[cron-entry-logrotate-trafficserver]
recipe = slapos.cookbook:cron.d
cron-entries = ${directory:etc}/cron.d
name = trafficserver-logrotate
frequency = 0 0 * * *
command = ${trafficserver-rotate-script:output}
### End of ATS sections
### Caddy Graceful and promises
[frontend-caddy-configuration-state]
< = jinja2-template-base
url = {{ software_parameter_dict['template_configuration_state_script'] }}
output = ${directory:bin}/${:_buildout_section_name_}
path_list = ${caddy-configuration:frontend-configuration} ${caddy-directory:slave-configuration}/*.conf ${caddy-directory:master-autocert-dir}/*.key ${caddy-directory:master-autocert-dir}/*.crt ${caddy-directory:master-autocert-dir}/*.pem ${caddy-directory:autocert}/*.pem ${caddy-directory:custom-ssl-directory}/*.proxy_ca_crt ${directory:bbb-ssl-dir}/*.crt
sha256sum = {{ software_parameter_dict['sha256sum'] }}
extra-context =
key path_list :path_list
key sha256sum :sha256sum
key signature_file :signature_file
[frontend-caddy-configuration-state-graceful]
< = frontend-caddy-configuration-state
signature_file = ${directory:run}/graceful_configuration_state_signature
[frontend-caddy-configuration-state-validate]
< = frontend-caddy-configuration-state
signature_file = ${directory:run}/validate_configuration_state_signature
[frontend-caddy-graceful]
< = jinja2-template-base
url = {{ software_parameter_dict['template_graceful_script'] }}
output = ${directory:etc-run}/frontend-caddy-safe-graceful
extra-context =
key graceful_reload_command caddy-configuration:frontend-graceful-command
key caddy_configuration_state frontend-caddy-configuration-state-graceful:output
[frontend-caddy-validate]
< = jinja2-template-base
url = {{ software_parameter_dict['template_validate_script'] }}
output = ${directory:bin}/frontend-caddy-validate
last_state_file = ${directory:run}/caddy_configuration_last_state
validate_command = ${caddy-wrapper:output} -validate
extra-context =
key validate_command :validate_command
key configuration_state_command frontend-caddy-configuration-state-validate:output
key last_state_file :last_state_file
[frontend-caddy-lazy-graceful]
< = jinja2-template-base
url = {{ software_parameter_dict['template_caddy_lazy_script_call'] }}
output = ${directory:bin}/frontend-caddy-lazy-graceful
pid-file = ${directory:run}/lazy-graceful.pid
wait_time = 60
extra-context =
key pid_file :pid-file
key wait_time :wait_time
key lazy_command caddy-configuration:frontend-graceful-command
# Promises checking configuration:
[promise-helper-last-configuration-state]
< = jinja2-template-base
url = {{ software_parameter_dict['template_empty'] }}
output = ${directory:bin}/frontend-read-last-configuration-state
content =
#!/bin/sh
exit `cat ${frontend-caddy-validate:last_state_file}`
context =
key content :content
[promise-frontend-caddy-configuration]
<= monitor-promise-base
promise = validate_frontend_configuration
name = frontend-caddy-configuration-promise.py
config-verification-script = ${promise-helper-last-configuration-state:output}
[promise-caddy-frontend-v4-https]
<= monitor-promise-base
promise = check_socket_listening
name = caddy_frontend_ipv4_https.py
config-host = {{ instance_parameter_dict['ipv4-random'] }}
config-port = ${configuration:port}
[promise-caddy-frontend-v4-http]
<= monitor-promise-base
promise = check_socket_listening
name = caddy_frontend_ipv4_http.py
config-host = {{ instance_parameter_dict['ipv4-random'] }}
config-port = ${configuration:plain_http_port}
[promise-caddy-frontend-v6-https]
<= monitor-promise-base
promise = check_socket_listening
name = caddy_frontend_ipv6_https.py
config-host = {{ instance_parameter_dict['ipv6-random'] }}
config-port = ${configuration:port}
[promise-caddy-frontend-v6-http]
<= monitor-promise-base
promise = check_socket_listening
name = caddy_frontend_ipv6_http.py
config-host = {{ instance_parameter_dict['ipv6-random'] }}
config-port = ${configuration:plain_http_port}
[promise-backend-haproxy-http]
<= monitor-promise-base
promise = check_socket_listening
name = backend_haproxy_http.py
config-host = {{ instance_parameter_dict['ipv4-random'] }}
config-port = ${backend-haproxy-configuration:http-port}
[promise-backend-haproxy-https]
<= monitor-promise-base
promise = check_socket_listening
name = backend_haproxy_https.py
config-host = {{ instance_parameter_dict['ipv4-random'] }}
config-port = ${backend-haproxy-configuration:https-port}
[backend-haproxy-configuration]
file = ${directory:etc}/backend-haproxy.cfg
pid-file = ${directory:run}/backend-haproxy.pid
log-socket = ${backend-haproxy-rsyslogd-config:log-socket}
graceful-command = ${backend-haproxy-validate:output} && kill -USR2 $(cat ${:pid-file})
http-port = ${configuration:backend-haproxy-http-port}
https-port = ${configuration:backend-haproxy-https-port}
# Caucase related configuration
caucase-url = {{ slapparameter_dict['backend-client-caucase-url'] }}
ca-certificate = ${backend-client-login-config:ca-certificate}
certificate = ${backend-client-login-config:certificate}
cas-ca-certificate = ${backend-client-login-config:cas-ca-certificate}
csr = ${backend-client-caucase-updater-csr:csr}
crl = ${backend-client-login-config:crl}
# the statistic page
statistic-certificate = ${self-signed-ip-access:certificate}
statistic-port = ${configuration:backend-haproxy-statistic-port}
statistic-username = ${monitor-instance-parameter:username}
statistic-password = ${monitor-htpasswd:passwd}
statistic-identification = {{ instance_parameter_dict['configuration.frontend-name'] + ' @ ' + slapparameter_dict['cluster-identification'] }}
statistic-frontend-secure_access = ${backend-haproxy-statistic-frontend:connection-secure_access}
[backend-haproxy]
recipe = slapos.cookbook:wrapper
command-line = {{ software_parameter_dict['haproxy_executable'] }} -f ${backend-haproxy-configuration:file}
wrapper-path = ${directory:service}/backend-haproxy
hash-existing-files = ${buildout:directory}/software_release/buildout.cfg
[backend-haproxy-rsyslogd-lazy-graceful]
< = jinja2-template-base
url = {{ software_parameter_dict['template_caddy_lazy_script_call'] }}
output = ${directory:bin}/backend-haproxy-rsyslogd-lazy-graceful
pid-file = ${directory:run}/backend-haproxy-rsyslogd-lazy-graceful.pid
wait_time = 60
extra-context =
key pid_file :pid-file
key wait_time :wait_time
key lazy_command backend-haproxy-rsyslogd-config:graceful-command
[logrotate-entry-backend-haproxy]
<= logrotate-entry-base
name = backend-haproxy
log = ${backend-haproxy-rsyslogd-config:log-file}
rotate-num = ${configuration:rotate-num}
# Note: Slaves do not define their own reload, as this would be repeated,
# because sharedscripts work per entry, and each slave needs its own
# olddir
# Here we trust that there will be something to be rotated with error
# or access log, and that this will trigger postrotate script.
post = ${backend-haproxy-rsyslogd-lazy-graceful:output} &
delaycompress =
[backend-haproxy-configuration-state]
<= jinja2-template-base
url = {{ software_parameter_dict['template_configuration_state_script'] }}
output = ${directory:bin}/${:_buildout_section_name_}
path_list = ${backend-haproxy-configuration:file} ${backend-client-login-config:certificate}
sha256sum = {{ software_parameter_dict['sha256sum'] }}
extra-context =
key path_list :path_list
key sha256sum :sha256sum
key signature_file :signature_file
[backend-haproxy-configuration-state-graceful]
<= backend-haproxy-configuration-state
signature_file = ${directory:run}/backend_haproxy_graceful_configuration_state_signature
[backend-haproxy-configuration-state-validate]
<= backend-haproxy-configuration-state
signature_file = ${directory:run}/backend_haproxy_validate_configuration_state_signature
[backend-haproxy-graceful]
< = jinja2-template-base
url = {{ software_parameter_dict['template_graceful_script'] }}
output = ${directory:etc-run}/backend-haproxy-safe-graceful
extra-context =
key graceful_reload_command backend-haproxy-configuration:graceful-command
key caddy_configuration_state backend-haproxy-configuration-state-graceful:output
[backend-haproxy-validate]
<= jinja2-template-base
url = {{ software_parameter_dict['template_validate_script'] }}
output = ${directory:bin}/backend-haproxy-validate
last_state_file = ${directory:run}/backend_haproxy_configuration_last_state
validate_command = {{ software_parameter_dict['haproxy_executable'] }} -f ${backend-haproxy-configuration:file} -c
extra-context =
key validate_command :validate_command
key configuration_state_command backend-haproxy-configuration-state-validate:output
key last_state_file :last_state_file
[promise-backend-haproxy-configuration]
<= monitor-promise-base
promise = validate_frontend_configuration
name = backend-haproxy-configuration.py
config-verification-script = ${promise-backend-haproxy-configuration-helper:output}
[promise-backend-haproxy-configuration-helper]
< = jinja2-template-base
url = {{ software_parameter_dict['template_empty'] }}
output = ${directory:bin}/backend-haproxy-read-last-configuration-state
content =
#!/bin/sh
exit `cat ${backend-haproxy-validate:last_state_file}`
context =
key content :content
[backend-haproxy-rsyslogd-config]
log-socket = ${directory:run}/bhlog.sck
log-file = ${directory:log}/backend-haproxy.log
pid-file = ${directory:run}/backend-haproxy-rsyslogd.pid
spool-directory = ${directory:backend-haproxy-rsyslogd-spool}
graceful-command = kill -HUP $(cat ${:pid-file})
caddy-log-directory = ${caddy-directory:slave-log}
[backend-haproxy-rsyslogd-configuration]
<= jinja2-template-base
url = ${software-release-path:template-backend-haproxy-rsyslogd-conf}
output = ${directory:etc}/backend-haproxy-rsyslogd.conf
extra-context =
section configuration backend-haproxy-rsyslogd-config
[backend-haproxy-rsyslogd]
recipe = slapos.cookbook:wrapper
command-line = {{ software_parameter_dict['rsyslogd_executable'] }} -i ${backend-haproxy-rsyslogd-config:pid-file} -n -f ${backend-haproxy-rsyslogd-configuration:output}
wrapper-path = ${directory:service}/backend-haproxy-rsyslogd
hash-existing-files = ${buildout:directory}/software_release/buildout.cfg
#######
# Monitoring sections
#
[monitor-instance-parameter]
# Note: Workaround for monitor stack, which uses monitor-httpd-port parameter
# directly, and in our case it can come from the network, thus resulting
# with need to strip !py!'u'
monitor-httpd-port = {{ instance_parameter_dict['configuration.monitor-httpd-port'] | int }}
password = {{ instance_parameter_dict['configuration.monitor-password'] | string }}
[monitor-conf-parameters]
private-path-list +=
${logrotate-directory:logrotate-backup}
[monitor-traffic-summary-last-stats-wrapper]
< = jinja2-template-base
url = {{ software_parameter_dict['template_wrapper'] }}
output = ${directory:bin}/traffic-summary-last-stats_every_1_hour
command = export TS_ROOT=${buildout:directory} && echo "$({{ software_parameter_dict['trafficserver'] }}/bin/traffic_logstats -f ${trafficserver-directory:log}/squid.blog) "
extra-context =
key content monitor-traffic-summary-last-stats-wrapper:command
# Produce ATS Cache stats
[monitor-ats-cache-stats-wrapper]
< = jinja2-template-base
url = {{ software_parameter_dict['template_wrapper'] }}
output = ${directory:bin}/ats-cache-stats_every_1_hour
command = export TS_ROOT=${buildout:directory} && echo "$({{ software_parameter_dict['trafficserver'] }}/bin/traffic_shell ${monitor-ats-cache-stats-config:output}) "
extra-context =
key content monitor-ats-cache-stats-wrapper:command
[monitor-caddy-server-status-wrapper]
< = jinja2-template-base
url = {{ software_parameter_dict['template_wrapper'] }}
output = ${directory:bin}/monitor-caddy-server-status-wrapper
command = {{ software_parameter_dict['curl'] }}/bin/curl -s http://{{ instance_parameter_dict['ipv4-random'] }}:${configuration:plain_http_port}/server-status -u ${monitor-instance-parameter:username}:${monitor-htpasswd:passwd} 2>&1
extra-context =
key content monitor-caddy-server-status-wrapper:command
[monitor-ats-cache-stats-config]
< = jinja2-template-base
url = {{ software_parameter_dict['template_empty'] }}
output = ${trafficserver-configuration-directory:target}/cache-config.stats
context =
raw content show:cache-stats
[monitor-verify-re6st-connectivity]
<= monitor-promise-base
promise = check_url_available
name = re6st-connectivity.py
config-url = ${configuration:re6st-verification-url}
[port-redirection]
<= jinja2-template-base
inline =
[{"srcPort": 80, "destPort": {{ '{{' }} http_port {{ '}}' }}}, {"srcPort": 443, "destPort": {{ '{{' }} https_port {{ '}}' }}}]
output = ${buildout:directory}/.slapos-port-redirect
extra-context =
key http_port configuration:plain_http_port
key https_port configuration:port
[slave-introspection-frontend]
<= slap-connection
recipe = slapos.cookbook:requestoptional
name = Slave Introspection Frontend {{ instance_parameter_dict['configuration.frontend-name'] }}
software-url = http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg
slave = true
config-url = https://[${slap-configuration:ipv6-random}]:{{ instance_parameter_dict['configuration.slave-introspection-https-port'] }}/
config-https-only = true
return = domain secure_access
[backend-haproxy-statistic-frontend]
<= slap-connection
recipe = slapos.cookbook:requestoptional
name = Backend Haproxy Statistic Frontend {{ instance_parameter_dict['configuration.frontend-name'] }}
software-url = http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg
slave = true
config-url = https://[${slap-configuration:ipv6-random}]:{{ instance_parameter_dict['configuration.backend-haproxy-statistic-port'] }}/
config-https-only = true
return = domain secure_access
[backend-haproxy-statistic-frontend-promise]
<= monitor-promise-base
promise = check_url_available
name = backend-haproxy-statistic-frontend.py
config-http-code = 401
config-url =
${backend-haproxy-statistic-frontend:connection-secure_access}
[slave-introspection-configuration-state]
<= jinja2-template-base
url = {{ software_parameter_dict['template_configuration_state_script'] }}
output = ${directory:bin}/${:_buildout_section_name_}
path_list = ${frontend-configuration:slave-introspection-configuration} ${frontend-configuration:ip-access-certificate}
sha256sum = {{ software_parameter_dict['sha256sum'] }}
extra-context =
key path_list :path_list
key sha256sum :sha256sum
key signature_file :signature_file
[slave-introspection-configuration-state-graceful]
<= slave-introspection-configuration-state
signature_file = ${directory:run}/slave_introspection_graceful_configuration_state_signature
[slave-introspection-configuration-state-validate]
<= slave-introspection-configuration-state
signature_file = ${directory:run}/slave_introspection_validate_configuration_state_signature
[slave-introspection-graceful]
< = jinja2-template-base
url = {{ software_parameter_dict['template_graceful_script'] }}
output = ${directory:etc-run}/slave-introspection-safe-graceful
extra-context =
key graceful_reload_command caddy-configuration:slave-introspection-graceful-command
key caddy_configuration_state slave-introspection-configuration-state-graceful:output
[slave-introspection-validate]
<= jinja2-template-base
url = {{ software_parameter_dict['template_validate_script'] }}
output = ${directory:bin}/slave-introspection-validate
last_state_file = ${directory:run}/slave_introspection_configuration_last_state
validate_command = {{ software_parameter_dict['nginx'] }} -c ${frontend-configuration:slave-introspection-configuration} -t
extra-context =
key validate_command :validate_command
key configuration_state_command slave-introspection-configuration-state-validate:output
key last_state_file :last_state_file
[promise-slave-introspection-configuration]
<= monitor-promise-base
promise = validate_frontend_configuration
name = slave-introspection-configuration.py
config-verification-script = ${promise-slave-introspection-configuration-helper:output}
[promise-slave-introspection-configuration-helper]
< = jinja2-template-base
url = {{ software_parameter_dict['template_empty'] }}
output = ${directory:bin}/slave-introspection-read-last-configuration-state
content =
#!/bin/sh
exit `cat ${slave-introspection-validate:last_state_file}`
context =
key content :content
[promise-slave-introspection-https]
<= monitor-promise-base
promise = check_socket_listening
name = slave_introspection_https.py
config-host = {{ instance_parameter_dict['ipv6-random'] }}
config-port = ${frontend-configuration:slave-introspection-https-port}
[logrotate-entry-slave-introspection]
<= logrotate-entry-base
name = slave-introspection
log = ${caddy-configuration:slave-introspection-access-log} ${caddy-configuration:slave-introspection-error-log}
rotate-num = ${configuration:rotate-num}
post = kill -USR1 $(cat ${caddy-configuration:slave-introspection-pid-file})
delaycompress =
[promise-logrotate-setup]
<= monitor-promise-base
promise = check_command_execute
name = ${:_buildout_section_name_}.py
config-command =
${logrotate:wrapper-path} -d
[configuration]
{%- for key, value in instance_parameter_dict.iteritems() -%}
{%- if key.startswith('configuration.') %}
{{ key.replace('configuration.', '') }} = {{ dumps(value) }}
{%- endif -%}
{%- endfor %}
[instance-parameter-section]
{#- There are dangerous keys like recipe, etc #}
{#- XXX: Some other approach would be useful #}
{%- set DROP_KEY_LIST = ['recipe', '__buildout_signature__', 'computer', 'partition', 'url', 'key', 'cert'] %}
{%- for key, value in instance_parameter_dict.iteritems() -%}
{%- if not key.startswith('configuration.') and key not in DROP_KEY_LIST %}
{{ key }} = {{ dumps(value) }}
{%- endif -%}
{%- endfor %}
[software-parameter-section]
{%- for key, value in software_parameter_dict.iteritems() %}
{{ key }} = {{ dumps(value) }}
{%- endfor %}
instance-apache-replicate.cfg.in 0000664 0000000 0000000 00000116243 14241130220 0034467 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend {% set aibcc_enabled = True %}
{% import "caucase" as caucase with context %}
{%- set TRUE_VALUES = ['y', 'yes', '1', 'true'] -%}
{%- set GOOD_CIPHER_LIST = ['ECDHE-ECDSA-AES256-GCM-SHA384', 'ECDHE-RSA-AES256-GCM-SHA384', 'ECDHE-ECDSA-AES128-GCM-SHA256', 'ECDHE-RSA-AES128-GCM-SHA256', 'ECDHE-ECDSA-WITH-CHACHA20-POLY1305', 'ECDHE-RSA-WITH-CHACHA20-POLY1305', 'ECDHE-RSA-AES256-CBC-SHA', 'ECDHE-RSA-AES128-CBC-SHA', 'ECDHE-ECDSA-AES256-CBC-SHA', 'ECDHE-ECDSA-AES128-CBC-SHA', 'RSA-AES256-CBC-SHA', 'RSA-AES128-CBC-SHA', 'ECDHE-RSA-3DES-EDE-CBC-SHA', 'RSA-3DES-EDE-CBC-SHA'] %}
{#- Allow to pass only some parameters to frontend nodes #}
{%- set FRONTEND_NODE_PASSED_KEY_LIST = [
'plain_http_port',
'port',
'apache-certificate',
'apache-key',
'domain',
'enable-http2-by-default',
'global-disable-http2',
'mpm-graceful-shutdown-timeout',
're6st-verification-url',
'backend-connect-timeout',
'backend-connect-retries',
'ciphers',
'request-timeout',
'authenticate-to-backend',
]
%}
{#- SlapOS Master (but not slapproxy!) merges slave's instance and connection parameters, so the slave information passed to nodes have to be limited only to instance related keys #}
{#- Note: As a result, this feature is very hard to be tested with slapproxy, as it does not pollute the slave information, this kind of whitelist is implemented #}
{%- set FRONTEND_NODE_SLAVE_PASSED_KEY_LIST_SCHEMA = [
'authenticate-to-backend',
'backend-connect-retries',
'backend-connect-timeout',
'ciphers',
'custom_domain',
'default-path',
'disable-no-cache-request',
'disable-via-header',
'disabled-cookie-list',
'enable-http2',
'enable_cache',
'health-check',
'health-check-authenticate-to-failover-backend',
'health-check-failover-https-url',
'health-check-failover-https-url-netloc-list',
'health-check-failover-ssl-proxy-ca-crt',
'health-check-failover-ssl-proxy-verify',
'health-check-failover-url',
'health-check-failover-url-netloc-list',
'health-check-fall',
'health-check-http-method',
'health-check-http-path',
'health-check-http-version',
'health-check-interval',
'health-check-rise',
'health-check-timeout',
'https-only',
'https-url',
'https-url-netloc-list',
'monitor-ipv4-test',
'monitor-ipv6-test',
'path',
'prefer-gzip-encoding-to-backend',
'request-timeout',
'server-alias',
'ssl-proxy-verify',
'ssl_ca_crt',
'ssl_crt',
'ssl_key',
'ssl_proxy_ca_crt',
'strict-transport-security',
'strict-transport-security-preload',
'strict-transport-security-sub-domains',
'type',
'url',
'url-netloc-list',
'virtualhostroot-http-port',
'virtualhostroot-https-port',
'websocket-path-list',
'websocket-transparent',
]
%}
{%- set FRONTEND_NODE_SLAVE_PASSED_KEY_LIST_INTERNAL = [
'slave_reference',
]
%}
{%- set FRONTEND_NODE_SLAVE_PASSED_KEY_LIST = FRONTEND_NODE_SLAVE_PASSED_KEY_LIST_SCHEMA + FRONTEND_NODE_SLAVE_PASSED_KEY_LIST_INTERNAL %}
{% set aikc_enabled = slapparameter_dict.get('automatic-internal-kedifa-caucase-csr', 'true').lower() in TRUE_VALUES %}
{% set aibcc_enabled = slapparameter_dict.get('automatic-internal-backend-client-caucase-csr', 'true').lower() in TRUE_VALUES %}
{# Ports 8401, 8402 and 8410+1..N are reserved for monitor ports on various partitions #}
{% set master_partition_monitor_monitor_httpd_port = 8401 %}
{% set kedifa_partition_monitor_httpd_port = 8402 %}
{% set frontend_monitor_httpd_base_port = 8410 %}
{% set caucase_host = '[' ~ instance_parameter_dict['ipv6-random'] ~ ']' %}
{% set caucase_netloc = caucase_host ~ ':' ~ instance_parameter_dict['configuration.caucase_backend_client_port'] %}
{% set caucase_url = 'http://' ~ caucase_netloc %}
[jinja2-template-base]
recipe = slapos.recipe.template:jinja2
output = ${buildout:directory}/${:filename}
extra-context =
context =
import json_module json
raw profile_common {{ software_parameter_dict['profile_common'] }}
${:extra-context}
{% set popen = functools_module.partial(subprocess_module.Popen, stdout=subprocess_module.PIPE, stderr=subprocess_module.STDOUT, stdin=subprocess_module.PIPE) %}
{% set part_list = [] %}
{% set single_type_key = 'single-' %}
{% set frontend_type = "%s%s" % (single_type_key, 'custom-personal') %}
{% set frontend_quantity = slapparameter_dict.pop('-frontend-quantity', '1') | int %}
{% set slave_list_name = 'extra_slave_instance_list' %}
{% set frontend_list = [] %}
{% set frontend_section_list = [] %}
{% set request_dict = {} %}
{% set namebase = 'caddy-frontend' %}
# XXX Dirty hack, not possible to define default value before
{% set sla_computer_caddy_1_key = '-sla-1-computer_guid' %}
{% if not sla_computer_caddy_1_key in slapparameter_dict %}
{% do slapparameter_dict.__setitem__(sla_computer_caddy_1_key, '${slap-connection:computer-id}') %}
{% endif %}
{% set sla_computer_kedifa_key = '-sla-kedifa-computer_guid' %}
{% if not sla_computer_kedifa_key in slapparameter_dict %}
{% do slapparameter_dict.__setitem__(sla_computer_kedifa_key, '${slap-connection:computer-id}') %}
{% endif %}
# Here we request individually each frontend.
# The presence of sla parameters is checked and added if found
{% for i in range(1, frontend_quantity + 1) %}
{% set frontend_name = "%s-%s" % (namebase, i) %}
{% set request_section_title = 'request-%s' % frontend_name %}
{% set sla_key = "-sla-%s-" % i %}
{% set sla_key_length = sla_key | length %}
{% set sla_dict = {} %}
{% set config_key = "-frontend-config-%s-" % i %}
{% set config_key_length = config_key | length %}
{% set config_dict = {} %}
{% for key in slapparameter_dict.keys() %}
{% if key.startswith(sla_key) %}
{% do sla_dict.__setitem__(key[sla_key_length:], slapparameter_dict.pop(key)) %}
# We check for specific configuration regarding the frontend
{% elif key.startswith(config_key) %}
{% do config_dict.__setitem__(key[config_key_length:], slapparameter_dict.pop(key)) %}
{% endif %}
{% endfor %}
{% do config_dict.__setitem__('monitor-httpd-port', frontend_monitor_httpd_base_port + i) %}
{% do config_dict.__setitem__('backend-client-caucase-url', caucase_url) %}
{% set state_key = "-frontend-%s-state" % i %}
{% set frontend_state = slapparameter_dict.pop(state_key, None) %}
{% if frontend_state != 'destroyed' %}
{% do frontend_list.append(frontend_name) %}
{% do frontend_section_list.append(request_section_title) %}
{% endif %}
{% do part_list.append(request_section_title) %}
# Filling request dict for slave
{% set request_content_dict = {
'config': config_dict,
'name': frontend_name,
'sla': sla_dict,
'state': frontend_state
} %}
{% set frontend_software_url_key = "-frontend-%s-software-release-url" % i %}
{% do request_content_dict.__setitem__('software-url', slapparameter_dict.get(frontend_software_url_key) or '${slap-connection:software-release-url}') %}
{% do request_dict.__setitem__(request_section_title, request_content_dict) %}
{% endfor %}
{% set authorized_slave_string_list = [] %}
{% set authorized_slave_list = [] %}
{% set rejected_slave_dict = {} %}
{% set critical_rejected_slave_dict = {} %}
{% set warning_slave_dict = {} %}
{% set used_host_list = [] %}
{% for slave in sorted(instance_parameter_dict['slave-instance-list']) %}
{% set slave_error_list = [] %}
{% set slave_critical_error_list = [] %}
{% set slave_warning_list = [] %}
{% set slave_server_alias_unclashed = [] %}
{% set slave_type = slave.get('type') %}
{% if slave_type not in [None, '', 'default', 'zope', 'redirect', 'notebook', 'websocket'] %}
{% do slave_error_list.append('type:%s is not supported' % (slave_type,)) %}
{% endif %}
{# Check health-check-* #}
{% set health_check = (str(slave.get('health-check', False)) or 'false').lower() %}
{% if health_check in TRUE_VALUES %}
{% set health_check_http_method = slave.get('health-check-http-method') or 'GET' %}
{% if health_check_http_method not in ['GET', 'OPTIONS', 'CONNECT', 'POST'] %}
{% do slave_error_list.append('Wrong health-check-http-method %s' % (health_check_http_method,)) %}
{% endif %}
{% set health_check_http_path = slave.get('health-check-http-path') or '/' %}
{% set health_check_http_version = slave.get('health-check-http-version') or 'HTTP/1.1' %}
{% if health_check_http_version not in ['HTTP/1.1', 'HTTP/1.0'] %}
{% do slave_error_list.append('Wrong health-check-http-version %s' % (health_check_http_version,)) %}
{% endif %}
{% set health_check_timeout = (slave.get('health-check-timeout') or '2') | int(false) %}
{% if health_check_timeout is false or health_check_timeout <= 0 %}
{% do slave_error_list.append('Wrong health-check-timeout %s' % (slave.get('health-check-timeout'),)) %}
{% endif %}
{% set health_check_interval = (slave.get('health-check-interval') or '5') | int(false) %}
{% if health_check_interval is false or health_check_interval <= 0 %}
{% do slave_error_list.append('Wrong health-check-interval %s' % (slave.get('health-check-interval'),)) %}
{% endif %}
{% set health_check_rise = (slave.get('health-check-rise') or '1') | int(false) %}
{% if health_check_rise is false or health_check_rise <= 0 %}
{% do slave_error_list.append('Wrong health-check-rise %s' % (slave.get('health-check-rise'),)) %}
{% endif %}
{% set health_check_fall = (slave.get('health-check-fall') or '1') | int(false) %}
{% if health_check_fall is false or health_check_fall <= 0 %}
{% do slave_error_list.append('Wrong health-check-fall %s' % (slave.get('health-check-fall'),)) %}
{% endif %}
{% endif %}
{# Check virtualhostroot-http-port and virtualhostroot-https-port #}
{% for key in ['virtualhostroot-http-port', 'virtualhostroot-https-port'] %}
{% set value = (slave.get(key) or '1') | int(false) %}
{% if value is false or value < 0 %}
{% do slave_error_list.append('Wrong %s %r' % (key, slave.get(key))) %}
{% endif %}
{% endfor %}
{# Check ciphers #}
{% set slave_cipher_list = slave.get('ciphers', '').strip().split() %}
{% if slave_cipher_list %}
{% for cipher in slave_cipher_list %}
{% if cipher not in GOOD_CIPHER_LIST %}
{% do slave_error_list.append('Cipher %r is not supported.' % (cipher,)) %}
{% endif %}
{% endfor %}
{% endif %}
{# Check strict-transport-security #}
{% set strict_transport_security = (slave.get('strict-transport-security') or '0') | int(false) %}
{% if strict_transport_security is false or strict_transport_security < 0 %}
{% do slave_error_list.append('Wrong strict-transport-security %s' % (slave.get('strict-transport-security'),)) %}
{% endif %}
{% set custom_domain = slave.get('custom_domain') %}
{% if custom_domain and custom_domain in used_host_list %}
{% set message = 'custom_domain %r clashes' % (custom_domain,) %}
{% do slave_error_list.append(message) %}
{% do slave_critical_error_list.append(message) %}
{% else %}
{% do used_host_list.append(custom_domain) %}
{% endif %}
{% if slave.get('server-alias') %}
{% for slave_alias in ('' ~ slave['server-alias']).split() %}
{% if slave_alias.startswith('*.') %}
{% set clean_slave_alias = slave_alias[2:] %}
{% else %}
{% set clean_slave_alias = slave_alias %}
{% endif %}
{% if not validators.domain(clean_slave_alias) %}
{% do slave_error_list.append('server-alias \'%s\' not valid' % (slave_alias,)) %}
{% else %}
{% if slave_alias in slave_server_alias_unclashed or slave_alias == custom_domain %}
{# optionally do something about reporting back that server-alias has been unclashed #}
{% elif slave_alias in used_host_list %}
{% set message = 'server-alias \'%s\' clashes' % (slave_alias,) %}
{% do slave_error_list.append(message) %}
{% do slave_critical_error_list.append(message) %}
{% else %}
{% do slave_server_alias_unclashed.append(slave_alias) %}
{% do used_host_list.append(slave_alias) %}
{% endif %}
{% endif %}
{% endfor %}
{% do slave.__setitem__('server-alias', ' '.join(slave_server_alias_unclashed)) %}
{% endif %}
{% for url_key in ['url', 'https-url', 'health-check-failover-url', 'health-check-failover-https-url'] %}
{% if url_key in slave %}
{% set url = (slave[url_key] or '').strip() %}
{% if not validators.url(url) %}
{% do slave_error_list.append('slave %s %r invalid' % (url_key, url)) %}
{% elif url != slave[url_key] %}
{% do slave_warning_list.append('slave %s %r has been converted to %r' % (url_key, slave[url_key], url)) %}
{% endif %}
{% endif %}
{% endfor %}
{% for url_key in ['url-netloc-list', 'https-url-netloc-list', 'health-check-failover-url-netloc-list'] %}
{% if url_key in slave %}
{% for netloc in slave[url_key].split() %}
{% if not caddyprofiledummy.validate_netloc(netloc) %}
{% do slave_error_list.append('slave %s %r invalid' % (url_key, netloc)) %}
{% endif %}
{% endfor %}
{% endif %}
{% endfor %}
{% for k in ['ssl_proxy_ca_crt', 'health-check-failover-ssl-proxy-ca-crt'] %}
{% if k in slave %}
{% set crt = slave.get(k, '') %}
{% set check_popen = popen([software_parameter_dict['openssl'], 'x509', '-noout']) %}
{% do check_popen.communicate(crt) %}
{% if check_popen.returncode != 0 %}
{% do slave_error_list.append('%s is invalid' % (k,)) %}
{% endif %}
{% endif %}
{% endfor %}
{# BBB: SlapOS Master non-zero knowledge BEGIN #}
{% for key in ['ssl_key', 'ssl_crt', 'ssl_ca_crt'] %}
{% if key in slave %}
{% do slave_warning_list.append('%s is obsolete, please use key-upload-url' % (key,)) %}
{% endif %}
{% endfor %}
{% if slave.get('ssl_ca_crt') and not (slave.get('ssl_crt') and slave.get('ssl_key')) %}
{% do slave_error_list.append('ssl_ca_crt is present, so ssl_crt and ssl_key are required') %}
{% endif %}
{% if slave.get('ssl_key') and slave.get('ssl_crt') %}
{% set key_popen = popen([software_parameter_dict['openssl'], 'rsa', '-noout', '-modulus']) %}
{% set crt_popen = popen([software_parameter_dict['openssl'], 'x509', '-noout', '-modulus']) %}
{% set key_modulus = key_popen.communicate(slave['ssl_key'])[0] | trim %}
{% set crt_modulus = crt_popen.communicate(slave['ssl_crt'])[0] | trim %}
{% if not key_modulus or key_modulus != crt_modulus %}
{% do slave_error_list.append('slave ssl_key and ssl_crt does not match') %}
{% endif %}
{% endif %}
{# BBB: SlapOS Master non-zero knowledge END #}
{% if slave.get('custom_domain') %}
{% set slave_custom_domain = '' ~ slave['custom_domain'] %}
{% if slave_custom_domain.startswith('*.') %}
{% set clean_custom_domain = slave_custom_domain[2:] %}
{% else %}
{% set clean_custom_domain = slave_custom_domain %}
{% endif %}
{% if not validators.domain(clean_custom_domain) %}
{% do slave_error_list.append('custom_domain %r invalid' % (slave['custom_domain'],)) %}
{% endif %}
{% endif %}
{% if len(slave_error_list) == 0 %}
{# Cleanup slave from not needed keys which come from implementation of SlapOS Master #}
{# Send only controlled information about the slave to node #}
{% set authorized_slave = {} %}
{% for key in FRONTEND_NODE_SLAVE_PASSED_KEY_LIST + FRONTEND_NODE_SLAVE_PASSED_KEY_LIST %}
{% if key in slave %}
{% do authorized_slave.__setitem__(key, slave[key]) %}
{% endif %}
{% endfor %}
{% do authorized_slave_list.append(authorized_slave) %}
{% else %}
{% do rejected_slave_dict.__setitem__(slave.get('slave_reference'), sorted(slave_error_list)) %}
{% endif %}
{% if len(slave_critical_error_list) > 0 %}
{% do critical_rejected_slave_dict.__setitem__(slave.get('slave_reference'), sorted(slave_critical_error_list)) %}
{% endif %}
{% if len(slave_warning_list) > 0 %}
{% do warning_slave_dict.__setitem__(slave.get('slave_reference'), sorted(slave_warning_list)) %}
{% endif %}
{% endfor %}
{% do authorized_slave_list.sort() %}
[monitor-instance-parameter]
monitor-httpd-port = {{ master_partition_monitor_monitor_httpd_port }}
[replicate]
<= slap-connection
recipe = slapos.cookbook:requestoptional.serialised
config-monitor-cors-domains = {{ slapparameter_dict.get('monitor-cors-domains', 'monitor.app.officejs.com') }}
config-monitor-username = ${monitor-instance-parameter:username}
config-monitor-password = ${monitor-htpasswd:passwd}
software-type = {{frontend_type}}
return = slave-instance-information-list monitor-base-url backend-client-csr-url kedifa-csr-url csr-certificate backend-haproxy-statistic-url node-information-json
{#- Send only needed parameters to frontend nodes #}
{%- set base_node_configuration_dict = {} %}
{%- for key in FRONTEND_NODE_PASSED_KEY_LIST %}
{%- if key in slapparameter_dict %}
{%- do base_node_configuration_dict.__setitem__(key, slapparameter_dict[key]) %}
{%- endif %}
{%- endfor %}
{% for section, frontend_request in request_dict.iteritems() %}
{% set state = frontend_request.get('state', '') %}
[{{section}}]
<= replicate
name = {{ frontend_request.get('name') }}
software-url = {{ frontend_request['software-url'] }}
{% if state %}
state = {{ state }}
{% endif %}
{# Do not send additional parameters for destroyed nodes #}
{% if state != 'destroyed' %}
config-slave-kedifa-information = ${request-kedifa:connection-slave-kedifa-information}
config-kedifa-caucase-url = ${request-kedifa:connection-caucase-url}
config-backend-client-caucase-url = {{ caucase_url }}
config-master-key-download-url = ${request-kedifa:connection-master-key-download-url}
config-cluster-identification = {{ instance_parameter_dict['root-instance-title'] }}
{% set node_configuration_dict = {} %}
{% do node_configuration_dict.update(frontend_request.get('config')) %}
{# sort_keys are important in order to avoid shuffling parameters on each run #}
{% do node_configuration_dict.__setitem__(slave_list_name, json_module.dumps(authorized_slave_list, sort_keys=True)) %}
{% do node_configuration_dict.__setitem__("frontend-name", frontend_request.get('name')) %}
{%- for config_key, config_value in node_configuration_dict.iteritems() %}
config-{{ config_key }} = {{ dumps(config_value) }}
{% endfor -%}
{%- for config_key, config_value in base_node_configuration_dict.iteritems() %}
config-{{ config_key }} = {{ dumps(config_value) }}
{% endfor -%}
{% if frontend_request.get('sla') %}
{% for parameter, value in frontend_request.get('sla').iteritems() %}
sla-{{ parameter }} = {{ value }}
{% endfor %}
{% endif %}
{% else %}
{# Ignore return for destroyed nodes #}
return =
{% endif %}
{% endfor %}
{% set warning_list = [] %}
{% for key in ['apache-certificate', 'apache-key'] %}
{% if key in slapparameter_dict %}
{% do warning_list.append('%s is obsolete, please use master-key-upload-url' % (key, )) %}
{% endif %}
{% endfor %}
[publish-information]
<= monitor-publish
recipe = slapos.cookbook:publish
domain = {{ slapparameter_dict.get('domain') }}
slave-amount = {{ instance_parameter_dict['slave-instance-list'] | length }}
accepted-slave-amount = {{ authorized_slave_list | length }}
rejected-slave-amount = {{ rejected_slave_dict | length }}
backend-client-caucase-url = {{ caucase_url }}
{# sort_keys are important in order to avoid shuffling parameters on each run #}
rejected-slave-dict = {{ dumps(json_module.dumps(rejected_slave_dict, sort_keys=True)) }}
rejected-slave-promise-url = ${rejected-slave-promise:config-url}
master-key-upload-url = ${request-kedifa:connection-master-key-upload-url}
master-key-generate-auth-url = ${request-kedifa:connection-master-key-generate-auth-url}
kedifa-caucase-url = ${request-kedifa:connection-caucase-url}
{% if len(warning_list) > 0 %}
{# sort_keys are important in order to avoid shuffling parameters on each run #}
warning-list = {{ dumps(json_module.dumps(warning_list, sort_keys=True)) }}
{% endif %}
{% if len(warning_slave_dict) > 0 %}
{# sort_keys are important in order to avoid shuffling parameters on each run #}
warning-slave-dict = {{ dumps(json_module.dumps(warning_slave_dict, sort_keys=True)) }}
{% endif %}
{% if not aikc_enabled or not aibcc_enabled %}
{% for frontend in frontend_list %}
{% set section_part = '${request-' + frontend %}
{{ frontend }}-csr-certificate = {{ section_part }}:connection-csr-certificate}
{% endfor %}
{% endif %}
{% if not aikc_enabled %}
kedifa-csr-url = ${request-kedifa:connection-kedifa-csr-url}
kedifa-csr-certificate = ${request-kedifa:connection-csr-certificate}
{% for frontend in frontend_list %}
{% set section_part = '${request-' + frontend %}
{{ frontend }}-kedifa-csr-url = {{ section_part }}:connection-kedifa-csr-url}
{% endfor %}
{% endif %}
{% for frontend in frontend_list %}
{% set section_part = '${request-' + frontend %}
{{ frontend }}-backend-haproxy-statistic-url = {{ section_part }}:connection-backend-haproxy-statistic-url}
{{ frontend }}-node-information-json = ${frontend-information:{{ frontend }}-node-information-json}
{% endfor %}
{% if not aibcc_enabled %}
{% for frontend in frontend_list %}
{% set section_part = '${request-' + frontend %}
{{ frontend }}-backend-client-csr-url = {{ section_part }}:connection-backend-client-csr-url}
{% endfor %}
{% endif %}
# Generate promises for requested nodes
{% for frontend in frontend_list %}
{% set part_name = 'promise-backend-haproxy-statistic-url-' + frontend %}
{% do part_list.append(part_name) %}
{% set section_part = '${request-' + frontend %}
[{{ part_name }}]
<= monitor-promise-base
promise = check_url_available
name = check-backend-haproxy-statistic-url-{{ frontend }}.py
config-url =
{{ section_part }}:connection-backend-haproxy-statistic-url}
{% endfor %}
#----------------------------
#--
#-- Publish slave information
[publish-slave-information]
recipe = slapos.cookbook:switch-softwaretype
default = dynamic-publish-slave-information:output
RootSoftwareInstance = ${:default}
replicate = dynamic-publish-slave-information:output
custom-personal = dynamic-publish-slave-information:output
custom-group = dynamic-publish-slave-information:output
[request-kedifa]
<= slap-connection
recipe = slapos.cookbook:requestoptional.serialised
config-monitor-cors-domains = {{ slapparameter_dict.get('monitor-cors-domains', 'monitor.app.officejs.com') }}
config-monitor-username = ${monitor-instance-parameter:username}
config-monitor-password = ${monitor-htpasswd:passwd}
config-monitor-httpd-port = {{ kedifa_partition_monitor_httpd_port }}
{% for key in ['kedifa_port', 'caucase_port'] -%}
{%- if key in slapparameter_dict %}
config-{{ key }} = {{ dumps(slapparameter_dict[key]) }}
{%- endif %}
{%- endfor %}
config-slave-list = {{ dumps(authorized_slave_list) }}
config-cluster-identification = {{ instance_parameter_dict['root-instance-title'] }}
{% set software_url_key = "-kedifa-software-release-url" %}
{% if slapparameter_dict.has_key(software_url_key) %}
software-url = {{ slapparameter_dict.pop(software_url_key) }}
{% else %}
software-url = ${slap-connection:software-release-url}
{% endif %}
software-type = kedifa
name = kedifa
return = slave-kedifa-information master-key-generate-auth-url master-key-upload-url master-key-download-url caucase-url kedifa-csr-url csr-certificate monitor-base-url
{% set sla_kedifa_key = "-sla-kedifa-" %}
{% set sla_kedifa_key_length = sla_kedifa_key | length %}
{% for key in slapparameter_dict.keys() %}
{% if key.startswith(sla_kedifa_key) %}
sla-{{ key[sla_kedifa_key_length:] }} = {{ slapparameter_dict.pop(key) }}
{% endif %}
{% endfor %}
[rejected-slave-information]
rejected-slave-dict = {{ dumps(rejected_slave_dict) }}
[warning-slave-information]
warning-slave-dict = {{ dumps(warning_slave_dict) }}
[slave-information]
{% for frontend_section in frontend_section_list %}
{{ frontend_section }} = {{ "${%s:connection-slave-instance-information-list}" % frontend_section }}
{% endfor %}
[active-slave-instance]
{% set active_slave_instance_list = [] %}
{% for slave_instance in instance_parameter_dict['slave-instance-list'] %}
{# Provide a list of slave titles send by master, in order to filter out already destroyed slaves #}
{# Note: This functionality is not yet covered by tests, please modify with care #}
{% do active_slave_instance_list.append(slave_instance['slave_reference']) %}
{% endfor %}
{# sort_keys are important in order to avoid shuffling parameters on each run #}
active-slave-instance-list = {{ json_module.dumps(active_slave_instance_list, sort_keys=True) }}
[frontend-information]
{% for frontend in frontend_list %}
{% set section_part = '${request-' + frontend %}
{{ frontend }}-node-information-json = {{ section_part }}:connection-node-information-json}
{% endfor %}
[dynamic-publish-slave-information]
< = jinja2-template-base
url = {{ software_parameter_dict['profile_replicate_publish_slave_information'] }}
filename = dynamic-publish-slave-information.cfg
extensions = jinja2.ext.do
extra-context =
section slave_information slave-information
section frontend_information frontend-information
section rejected_slave_information rejected-slave-information
section active_slave_instance_dict active-slave-instance
section warning_slave_information warning-slave-information
key slave_kedifa_information request-kedifa:connection-slave-kedifa-information
[monitor-base-url-dict]
kedifa = ${request-kedifa:connection-monitor-base-url}
{% for frontend in frontend_section_list %}
{{ frontend }} = {{ '${' + frontend + ':connection-monitor-base-url}' }}
{% endfor %}
[directory]
recipe = slapos.cookbook:mkdirectory
bin = ${buildout:directory}/bin/
srv = ${buildout:directory}/srv/
tmp = ${buildout:directory}/tmp/
backup = ${:srv}/backup
# CAUCASE directories
caucased = ${:srv}/caucased
backup-caucased = ${:backup}/caucased
# NGINX
rejected-var = ${:var}/rejected-nginx
{% if aikc_enabled %}
[directory]
aikc = ${:srv}/aikc
[aikc-config]
caucase-url = ${request-kedifa:connection-caucase-url}
csr = ${directory:aikc}/csr.pem
key = ${directory:aikc}/key.pem
ca-certificate = ${directory:aikc}/cas-ca-certificate.pem
crl = ${directory:aikc}/crl.pem
user-ca-certificate = ${directory:aikc}/user-ca-certificate.pem
user-crl = ${directory:aikc}/user-crl.pem
user-created = ${directory:aikc}/user-created
data_dir = ${directory:aikc}/caucase-updater
[aikc-user-csr]
recipe = plone.recipe.command
organization = {{ instance_parameter_dict['root-instance-title'] }}
organizational_unit = Automatic Internal Kedifa Caucase CSR
command =
if [ ! -f ${:csr} ] && [ ! -f ${:key} ] ; then
{{ software_parameter_dict['openssl'] }} req -new -sha256 \
-newkey rsa:2048 -nodes -keyout ${:key} \
-subj "/O=${:organization}/OU=${:organizational_unit}" \
-out ${:csr}
fi
update-command = ${:command}
csr = ${aikc-config:csr}
key = ${aikc-config:key}
{#- Can be stopped on error, as does not rely on self provided service #}
stop-on-error = True
[aikc-caucase-wrapper]
{# jinja2 instead of wrapper is used with context to remove py'u' #}
recipe = slapos.recipe.template:jinja2
context =
key caucase_url aikc-config:caucase-url
inline =
#!{{ software_parameter_dict['dash'] }}/bin/dash
exec {{ software_parameter_dict['bin_directory'] }}/caucase \
--ca-url {{ '{{ caucase_url }}' }} \
--ca-crt ${aikc-config:ca-certificate} \
--user-ca-crt ${aikc-config:user-ca-certificate} \
--user-crl ${aikc-config:user-crl} \
--crl ${aikc-config:crl} \
"$@"
output = ${directory:bin}/aikc-caucase-wrapper
{% do part_list.append('aikc-create-user') %}
[aikc-create-user]
recipe = plone.recipe.command
{#- The called command is smart enough to survive errors and retry #}
stop-on-error = False
update-command = ${:command}
csr_id = ${directory:aikc}/csr_id
command =
if ! [ -f ${aikc-config:user-created} ] ; then
${aikc-caucase-wrapper:output} --mode user --send-csr ${aikc-user-csr:csr} > ${:csr_id} || exit 1
cut -d ' ' -f 1 ${:csr_id} || exit 1
csr_id=`cut -d ' ' -f 1 ${:csr_id}`
sleep 1
${aikc-caucase-wrapper:output} --mode user --get-crt $csr_id ${aikc-config:key} || exit 1
touch ${aikc-config:user-created}
fi
{% do part_list.append('aikc-user-caucase-updater') %}
{% do part_list.append('aikc-user-caucase-updater-promise') %}
{{ caucase.updater(
prefix='aikc-user-caucase-updater',
buildout_bin_directory=software_parameter_dict['bin_directory'],
updater_path='${directory:service}/aikc-user-caucase-updater',
url='${aikc-config:caucase-url}',
data_dir='${aikc-config:data_dir}',
crt_path='${aikc-config:key}',
ca_path='${aikc-config:user-ca-certificate}',
crl_path='${aikc-config:user-crl}',
key_path='${aikc-config:key}',
mode='user',
)}}
[aikc-sign-promise-wrapper]
recipe = slapos.cookbook:wrapper
command-line = {{ software_parameter_dict['caucase_csr_sign_check'] }}
${aikc-config:caucase-url}
${aikc-config:ca-certificate}
${aikc-config:key}
wrapper-path = ${directory:bin}/aikc-caucase-csr-sign-check
{% do part_list.append('aikc-sign-promise') %}
[aikc-sign-promise]
<= monitor-promise-base
promise = check_command_execute
name = ${:_buildout_section_name_}.py
config-command = ${aikc-sign-promise-wrapper:wrapper-path}
{% for csr in frontend_list + ['kedifa'] %}
[aikc-{{ csr }}-wrapper]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:bin}/aikc-{{ csr }}-wrapper
command-line = {{ software_parameter_dict['smart_caucase_signer'] }}
${aikc-config:caucase-url}
${aikc-config:ca-certificate}
${directory:aikc}/{{ csr }}-done
${aikc-config:key}
${request-{{ csr }}:connection-kedifa-csr-url}
"${request-{{ csr }}:connection-csr-certificate}"
{% do part_list.append('aikc-%s' % (csr,)) %}
[aikc-{{ csr }}]
recipe = plone.recipe.command
{#- The called command is smart enough to survive errors and retry #}
stop-on-error = False
command =
${aikc-{{ csr }}-wrapper:wrapper-path}
update-command = ${:command}
{% endfor %}
{% endif %} {# if aikc_enabled #}
{% if aibcc_enabled %}
[directory]
aibcc = ${:srv}/aibcc
[aibcc-config]
caucase-url = {{ caucase_url }}
csr = ${directory:aibcc}/csr.pem
key = ${directory:aibcc}/key.pem
ca-certificate = ${directory:aibcc}/cas-ca-certificate.pem
crl = ${directory:aibcc}/crl.pem
user-ca-certificate = ${directory:aibcc}/user-ca-certificate.pem
user-crl = ${directory:aibcc}/user-crl.pem
user-created = ${directory:aibcc}/user-created
data_dir = ${directory:aibcc}/caucase-updater
[aibcc-user-csr]
recipe = plone.recipe.command
organization = {{ instance_parameter_dict['root-instance-title'] }}
organizational_unit = Automatic Sign Backend Client Caucase CSR
command =
if [ ! -f ${:csr} ] && [ ! -f ${:key} ] ; then
{{ software_parameter_dict['openssl'] }} req -new -sha256 \
-newkey rsa:2048 -nodes -keyout ${:key} \
-subj "/O=${:organization}/OU=${:organizational_unit}" \
-out ${:csr}
fi
update-command = ${:command}
csr = ${aibcc-config:csr}
key = ${aibcc-config:key}
{#- Can be stopped on error, as does not rely on self provided service #}
stop-on-error = True
[aibcc-caucase-wrapper]
{# jinja2 instead of wrapper is used with context to remove py'u' #}
recipe = slapos.recipe.template:jinja2
context =
key caucase_url aibcc-config:caucase-url
inline =
#!{{ software_parameter_dict['dash'] }}/bin/dash
exec {{ software_parameter_dict['bin_directory'] }}/caucase \
--ca-url {{ '{{ caucase_url }}' }} \
--ca-crt ${aibcc-config:ca-certificate} \
--user-ca-crt ${aibcc-config:user-ca-certificate} \
--user-crl ${aibcc-config:user-crl} \
--crl ${aibcc-config:crl} \
"$@"
output = ${directory:bin}/aibcc-caucase-wrapper
{% do part_list.append('aibcc-create-user') %}
[aibcc-create-user]
recipe = plone.recipe.command
# the caucase for this part is provided in this profile, so we can't fail
# as otherwise caucase will never be started...
{#- XXX: Create promise #}
stop-on-error = False
update-command = ${:command}
csr_id = ${directory:aibcc}/csr_id
command =
if ! [ -f ${aibcc-config:user-created} ] ; then
${aibcc-caucase-wrapper:output} --mode user --send-csr ${aibcc-user-csr:csr} > ${:csr_id} || exit 1
cut -d ' ' -f 1 ${:csr_id} || exit 1
csr_id=`cut -d ' ' -f 1 ${:csr_id}`
sleep 1
${aibcc-caucase-wrapper:output} --mode user --get-crt $csr_id ${aibcc-config:key} || exit 1
touch ${aibcc-config:user-created}
fi
{% do part_list.append('aibcc-user-caucase-updater') %}
{% do part_list.append('aibcc-user-caucase-updater-promise') %}
{{ caucase.updater(
prefix='aibcc-user-caucase-updater',
buildout_bin_directory=software_parameter_dict['bin_directory'],
updater_path='${directory:service}/aibcc-user-caucase-updater',
url='${aibcc-config:caucase-url}',
data_dir='${aibcc-config:data_dir}',
crt_path='${aibcc-config:key}',
ca_path='${aibcc-config:user-ca-certificate}',
crl_path='${aibcc-config:user-crl}',
key_path='${aibcc-config:key}',
mode='user',
)}}
[aibcc-sign-promise-wrapper]
recipe = slapos.cookbook:wrapper
command-line = {{ software_parameter_dict['caucase_csr_sign_check'] }}
${aibcc-config:caucase-url}
${aibcc-config:ca-certificate}
${aibcc-config:key}
wrapper-path = ${directory:bin}/aibcc-caucase-csr-sign-check
{% do part_list.append('aibcc-sign-promise') %}
[aibcc-sign-promise]
<= monitor-promise-base
promise = check_command_execute
name = ${:_buildout_section_name_}.py
config-command = ${aibcc-sign-promise-wrapper:wrapper-path}
{% for csr in frontend_list %}
[aibcc-{{ csr }}-wrapper]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:bin}/aibcc-{{ csr }}-wrapper
command-line = {{ software_parameter_dict['smart_caucase_signer'] }}
${aibcc-config:caucase-url}
${aibcc-config:ca-certificate}
${directory:aibcc}/{{ csr }}-done
${aibcc-config:key}
${request-{{ csr }}:connection-backend-client-csr-url}
"${request-{{ csr }}:connection-csr-certificate}"
{% do part_list.append('aibcc-%s' % (csr,)) %}
[aibcc-{{ csr }}]
recipe = plone.recipe.command
{#- The called command is smart enough to survive errors and retry #}
stop-on-error = False
command =
${aibcc-{{ csr }}-wrapper:wrapper-path}
update-command = ${:command}
{% endfor %}
{% endif %} {# if aibcc_enabled #}
[rejected-slave-json]
recipe = slapos.recipe.template:jinja2
filename = rejected-slave.json
directory = ${directory:promise-output}
output = ${:directory}/${:filename}
url = {{ software_parameter_dict['template_empty'] }}
{% if critical_rejected_slave_dict %}
{# sort_keys are important in order to avoid shuffling parameters on each run #}
content = {{ dumps(json_module.dumps(critical_rejected_slave_dict, indent=2, sort_keys=True)) }}
{% else %}
content =
{% endif %}
context =
key content :content
[directory]
service = ${:etc}/service
promise-output = ${:srv}/promise-output
[rejected-slave-publish-configuration]
ip = {{ instance_parameter_dict['ipv6-random'] }}
port = 14455
[rejected-slave-publish]
directory = ${rejected-slave-json:directory}
url = https://${rejected-slave-password:user}:${rejected-slave-password:passwd}@[${rejected-slave-publish-configuration:ip}]:${rejected-slave-publish-configuration:port}/${rejected-slave-json:filename}
recipe = slapos.cookbook:wrapper
command-line = {{ software_parameter_dict['nginx'] }}
-c ${rejected-slave-template:output}
wrapper-path = ${directory:service}/rejected-slave-publish
hash-existing-files =
${buildout:directory}/software_release/buildout.cfg
hash-files =
${rejected-slave-template:output}
${rejected-slave-certificate:certificate}
[rejected-slave-certificate]
recipe = plone.recipe.command
certificate = ${directory:etc}/rejected-slave.pem
key = ${:certificate}
{#- Can be stopped on error, as does not rely on self provided service #}
stop-on-error = True
update-command = ${:command}
command =
[ -f ${:certificate} ] && find ${:certificate} -type f -mtime +3 -delete
if ! [ -f ${:certificate} ] ; then
openssl req -new -newkey rsa:2048 -sha256 -subj \
"/CN=${rejected-slave-publish-configuration:ip}" \
-days 5 -nodes -x509 -keyout ${:certificate} -out ${:certificate}
fi
[rejected-slave-password]
recipe = slapos.cookbook:generate.password
storage-path = ${directory:etc}/.rejected-slave.passwd
bytes = 8
user = admin
[rejected-slave-htpasswd]
recipe = plone.recipe.command
{#- Can be stopped on error, as does not rely on self provided service #}
stop-on-error = True
file = ${directory:var}/nginx-rejected.htpasswd
{#- update-command is not needed, as if the ${:password} would change, the whole part will be recalculated #}
password = ${rejected-slave-password:passwd}
command = {{ software_parameter_dict['htpasswd'] }} -cb ${:file} ${rejected-slave-password:user} ${:password}
[rejected-slave-template]
recipe = slapos.recipe.template:jinja2
var = ${directory:rejected-var}
pid = ${directory:var}/nginx-rejected.pid
inline =
daemon off;
pid ${:pid};
error_log stderr;
events {
}
http {
include {{ software_parameter_dict['nginx_mime'] }};
server {
server_name_in_redirect off;
port_in_redirect off;
error_log stderr;
access_log /dev/null;
listen [${rejected-slave-publish-configuration:ip}]:${rejected-slave-publish-configuration:port} ssl;
ssl_certificate ${rejected-slave-certificate:certificate};
ssl_certificate_key ${rejected-slave-certificate:certificate};
default_type application/octet-stream;
client_body_temp_path ${:var} 1 2;
proxy_temp_path ${:var} 1 2;
fastcgi_temp_path ${:var} 1 2;
uwsgi_temp_path ${:var} 1 2;
scgi_temp_path ${:var} 1 2;
location / {
alias ${rejected-slave-json:directory}/;
autoindex off;
sendfile on;
sendfile_max_chunk 1m;
auth_basic "Rejected slave template";
auth_basic_user_file ${rejected-slave-htpasswd:file};
}
}
}
output = ${directory:etc}/nginx-rejected-slave.conf
[promise-rejected-slave-publish-ip-port]
<= monitor-promise-base
promise = check_socket_listening
name = rejected-slave-publish-ip-port-listening.py
config-host = ${rejected-slave-publish-configuration:ip}
config-port = ${rejected-slave-publish-configuration:port}
[rejected-slave-promise]
<= monitor-promise-base
promise = check_socket_listening
promise = check_file_state
name = rejected-slave.py
config-filename = ${rejected-slave-json:output}
config-state = empty
config-url = ${rejected-slave-publish:url}
[caucased-backend-client]
hash-existing-files = ${buildout:directory}/software_release/buildout.cfg
{{ caucase.caucased(
prefix='caucased-backend-client',
buildout_bin_directory=software_parameter_dict['bin_directory'],
caucased_path='${directory:service}/caucased-backend-client',
backup_dir='${directory:backup-caucased}',
data_dir='${directory:caucased}',
netloc=caucase_netloc,
tmp='${directory:tmp}',
service_auto_approve_count=0,
user_auto_approve_count=1,
key_len=2048
)}}
[buildout]
extends =
{{ software_parameter_dict['profile_common'] }}
{{ software_parameter_dict['profile_monitor2'] }}
parts =
monitor-base
publish-slave-information
publish-information
request-kedifa
rejected-slave-promise
promise-rejected-slave-publish-ip-port
caucased-backend-client
caucased-backend-client-promise
{% for part in part_list %}
{{ ' %s' % part }}
{% endfor %}
instance-caddy-input-schema.json 0000664 0000000 0000000 00000010612 14241130220 0034555 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend {
"$schema": "http://json-schema.org/draft-04/schema",
"properties": {
"-frontend-quantity": {
"description": "Quantity of Frontends Replicate.",
"title": "Frontend Replication Quantity",
"type": "integer"
},
"apache-certificate": {
"description": "SSL Certificate used by the server. By appending to it CA certificate it is possible to use this field to replace not implemented apache-ca-certificate. Deprecated, please use master-key-upload-url.",
"textarea": true,
"title": "[DEPRECATED] SSL Certificate, with optional CA certificate",
"type": "string"
},
"apache-key": {
"description": "SSL Key used by the server. Deprecated, please use master-key-upload-url.",
"textarea": true,
"title": "[DEPRECATED] SSL Key",
"type": "string"
},
"domain": {
"description": "Domain used to generate automatic hostnames for slaves. For example 'example.com' will result with slave hostname 'slaveref.example.com'.",
"pattern": "^([a-zA-Z0-9]([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?\\.)+[a-zA-Z]{2,6}$",
"title": "Domain",
"type": "string"
},
"enable-http2-by-default": {
"default": "true",
"description": "Use HTTP2 as default Protocol",
"enum": [
"true",
"false"
],
"title": "Enable HTTP2 by Default",
"type": "string"
},
"global-disable-http2": {
"default": "false",
"description": "Disables globally HTTP2 in Caddy, thus enable-http2-by-default here and enable-http2 have no effect. Rational is that in some loaded environments it is better to run Caddy without any HTTP2 capabilities. Warning: Changing the parameter will result in restarting Caddy process.",
"enum": [
"true",
"false"
],
"title": "Globally disable HTTP2",
"type": "string"
},
"mpm-graceful-shutdown-timeout": {
"default": 5,
"description": "Value passed to -grace parameter of Caddy, see https://caddyserver.com/docs/cli .",
"title": "Duration of the graceful shutdown period. Warning: Changing the parameter will result in restarting Caddy process.",
"type": "integer"
},
"re6st-verification-url": {
"description": "Url to verify if the internet and/or re6stnet is working.",
"title": "Test Verification URL",
"type": "string"
},
"backend-connect-timeout": {
"default": 5,
"description": "Time in seconds for establishing connection to the backend.",
"title": "Timeout for backend connection (seconds)",
"type": "integer"
},
"backend-connect-retries": {
"default": 3,
"description": "Amount of retries to connect to the backend. The amount of backend-connect-timeout*backend-connect-retries seconds will be spent to connect to the backend.",
"title": "Amount of retries to connect to the backend.",
"type": "integer"
},
"automatic-internal-kedifa-caucase-csr": {
"default": "true",
"description": "Automatically signs CSRs sent to KeDiFa's caucase, based on CSR comparison.",
"enum": [
"true",
"false"
],
"title": "Automatic Internal KeDiFa's Caucase CSR",
"type": "string"
},
"automatic-internal-backend-client-caucase-csr": {
"default": "true",
"description": "Automatically signs CSRs sent to Backend Client's caucase, based on CSR comparison.",
"enum": [
"true",
"false"
],
"title": "Automatic Internal Backend Client's Caucase CSR",
"type": "string"
},
"ciphers": {
"description": "List of ciphers. Empty defaults to Caddy list of ciphers. See https://caddyserver.com/docs/tls for more information.",
"title": "Ordered space separated list of ciphers",
"type": "string"
},
"request-timeout": {
"default": 600,
"description": "Timeout for HTTP requests.",
"title": "HTTP Request timeout in seconds",
"type": "integer"
},
"authenticate-to-backend": {
"default": "false",
"description": "If set to true the frontend certificate will be used as authentication certificate to the backend. Note: backend might have to know the frontend CA, available with 'backend-client-caucase-url'.",
"enum": [
"false",
"true"
],
"title": "Authenticate to backend",
"type": "string"
}
},
"title": "Input Parameters",
"type": "object"
}
instance-common.cfg.in 0000664 0000000 0000000 00000000701 14241130220 0032557 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend [buildout]
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
[slap_connection]
# Kept for backward compatibility
computer_id = ${slap-connection:computer-id}
partition_id = ${slap-connection:partition-id}
server_url = ${slap-connection:server-url}
software_release_url = ${slap-connection:software-release-url}
key_file = ${slap-connection:key-file}
cert_file = ${slap-connection:cert-file}
instance-kedifa.cfg.in 0000664 0000000 0000000 00000026104 14241130220 0032517 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend {%- if instance_parameter_dict['slap-software-type'] == software_type -%}
{% import "caucase" as caucase with context %}
# KeDiFa instance profile
[buildout]
extends =
{{ software_parameter_dict['profile_common'] }}
{{ software_parameter_dict['profile_monitor'] }}
{{ software_parameter_dict['profile_logrotate_base'] }}
parts =
monitor-base
directory
kedifa
logrotate-entry-kedifa
promise-kedifa-http-reply
slave-kedifa-information
caucased
caucased-promise
caucase-updater
promise-expose-csr-ip-port
promise-logrotate-setup
[monitor-instance-parameter]
# Note: Workaround for monitor stack, which uses monitor-httpd-port parameter
# directly, and in our case it can come from the network, thus resulting
# with need to strip !py!'u'
monitor-httpd-port = {{ instance_parameter_dict['configuration.monitor-httpd-port'] | int }}
password = {{ instance_parameter_dict['configuration.monitor-password'] | string }}
[caucased]
hash-existing-files = ${buildout:directory}/software_release/buildout.cfg
{% set caucase_host = '[' ~ instance_parameter_dict['ipv6-random'] ~ ']' %}
{% set caucase_netloc = caucase_host ~ ':' ~ instance_parameter_dict['configuration.caucase_port'] -%}
{% set caucase_url = 'http://' ~ caucase_netloc -%}
{{ caucase.caucased(
prefix='caucased',
buildout_bin_directory=software_parameter_dict['bin_directory'],
caucased_path='${directory:service}/caucased',
backup_dir='${directory:backup-caucased}',
data_dir='${directory:caucased}',
netloc=caucase_netloc,
tmp='${directory:tmp}',
service_auto_approve_count=0,
user_auto_approve_count=1,
key_len=2048
)}}
# Create all needed directories
[directory]
recipe = slapos.cookbook:mkdirectory
bin = ${buildout:directory}/bin/
etc = ${buildout:directory}/etc/
srv = ${buildout:directory}/srv/
var = ${buildout:directory}/var/
tmp = ${buildout:directory}/tmp/
backup = ${:srv}/backup
log = ${:var}/log
run = ${:var}/run
service = ${:etc}/service
etc-run = ${:etc}/run
# KeDiFa directories
kedifa = ${:srv}/kedifa
etc-kedifa = ${:etc}/kedifa
# CAUCASE directories
caucased = ${:srv}/caucased
backup-caucased = ${:backup}/caucased
# reservation
reservation = ${:srv}/reservation
# CSR publication
expose-csr = ${:srv}/expose-csr
expose-csr-etc = ${:etc}/expose-csr
expose-csr-var = ${:var}/expose-csr
[kedifa-csr]
recipe = plone.recipe.command
organization = {{ slapparameter_dict['cluster-identification'] }}
organizational_unit = Kedifa Partition
command =
if [ ! -f ${:template-csr} ] && [ ! -f ${:key} ] ; then
/bin/bash -c '{{ software_parameter_dict['openssl'] }} req -new -sha256 \
-newkey rsa:2048 -nodes -keyout ${:key} \
-subj "/O=${:organization}/OU=${:organizational_unit}" \
-reqexts SAN \
-config <(cat {{ software_parameter_dict['openssl_cnf'] }} \
<(printf "\n[SAN]\nsubjectAltName=IP:${kedifa-config:ip}")) \
-out ${:template-csr}'
fi
update-command = ${:command}
template-csr = ${kedifa-config:template-csr}
key = ${kedifa-config:key}
{#- Can be stopped on error, as does not rely on self provided service #}
stop-on-error = True
{{ caucase.updater(
prefix='caucase-updater',
buildout_bin_directory=software_parameter_dict['bin_directory'],
updater_path='${directory:service}/caucase-updater',
url=caucase_url,
data_dir='${directory:srv}/caucase-updater',
crt_path='${kedifa-config:certificate}',
ca_path='${kedifa-config:ca-certificate}',
crl_path='${kedifa-config:crl}',
key_path='${kedifa-csr:key}',
on_renew='${kedifa-reloader:output}',
template_csr='${kedifa-csr:template-csr}'
)}}
[expose-csr-link-csr]
recipe = plone.recipe.command
filename = csr.pem
csr_path = ${directory:expose-csr}/${:filename}
stop-on-error = False
update-command = ${:command}
command =
ln -sf ${caucase-updater-csr:csr} ${:csr_path}
[expose-csr-certificate]
recipe = plone.recipe.command
certificate = ${directory:expose-csr-etc}/certificate.pem
key = ${directory:expose-csr-etc}/key.pem
{#- Can be stopped on error, as does not rely on self provided service #}
stop-on-error = True
update-command = ${:command}
command =
if ! [ -f ${:key} ] && ! [ -f ${:certificate} ] ; then
{{ software_parameter_dict['openssl'] }} req -new -newkey rsa:2048 -sha256 -subj \
"/O=${kedifa-csr:organization}/OU=${kedifa-csr:organizational_unit}/CN={{ instance_parameter_dict['ipv6-random'] }}" \
-days 5 -nodes -x509 -keyout ${:key} -out ${:certificate}
fi
[expose-csr-configuration]
ip = {{ instance_parameter_dict['ipv6-random'] }}
port = 17000
key = ${expose-csr-certificate:key}
certificate = ${expose-csr-certificate:certificate}
error-log = ${directory:log}/expose-csr.log
var = ${directory:expose-csr-var}
pid = ${directory:var}/nginx-expose-csr.pid
root = ${directory:expose-csr}
nginx_mime = {{ software_parameter_dict['nginx_mime'] }}
[expose-csr-template]
recipe = slapos.recipe.template:jinja2
output = ${directory:expose-csr-etc}/nginx.conf
url = {{ software_parameter_dict['template_expose_csr_nginx_conf'] }}
context =
section configuration expose-csr-configuration
[promise-expose-csr-ip-port]
<= monitor-promise-base
promise = check_socket_listening
name = expose-csr-ip-port-listening.py
config-host = ${expose-csr-configuration:ip}
config-port = ${expose-csr-configuration:port}
[expose-csr]
recipe = slapos.cookbook:wrapper
command-line = {{ software_parameter_dict['nginx'] }}
-c ${expose-csr-template:output}
url = https://[${expose-csr-configuration:ip}]:${expose-csr-configuration:port}
wrapper-path = ${directory:service}/expose-csr
hash-existing-files = ${buildout:directory}/software_release/buildout.cfg
[expose-csr-certificate-get]
recipe = collective.recipe.shelloutput
commands =
certificate = cat ${expose-csr-certificate:certificate}
[jinja2-template-base]
recipe = slapos.recipe.template:jinja2
output = ${buildout:directory}/${:filename}
extra-context =
slapparameter_dict = {{ dumps(slapparameter_dict) }}
slap_software_type = {{ dumps(instance_parameter_dict['slap-software-type']) }}
context =
import json_module json
raw profile_common {{ software_parameter_dict['profile_common'] }}
key slap_software_type :slap_software_type
key slapparameter_dict :slapparameter_dict
section directory directory
${:extra-context}
[kedifa-config]
ip = {{ instance_parameter_dict['ipv6-random'] }}
port = {{ instance_parameter_dict['configuration.kedifa_port'] }}
db = ${directory:kedifa}/kedifa.sqlite
certificate = ${directory:etc-kedifa}/certificate.pem
key = ${:certificate}
ca-certificate = ${directory:etc-kedifa}/ca-certificate.pem
crl = ${directory:etc-kedifa}/crl.pem
template-csr = ${directory:etc-kedifa}/template-csr.pem
pidfile = ${directory:run}/kedifa.pid
logfile = ${directory:log}/kedifa.log
[kedifa-reloader]
<= jinja2-template-base
url = {{ software_parameter_dict['template_wrapper'] }}
output = ${directory:etc-run}/kedifa-reloader
command =
kill -HUP `cat ${kedifa-config:pidfile}`
extra-context =
key content :command
[promise-kedifa-http-reply]
<= monitor-promise-base
promise = check_url_available
name = kedifa-http-reply.py
# Kedifa replies 400 on /, so use it to be sure that Kedifa replied
config-http-code = 400
config-url = https://[${kedifa-config:ip}]:${kedifa-config:port}
config-ca-cert-file = ${kedifa-config:ca-certificate}
[logrotate-entry-kedifa]
<= logrotate-entry-base
name = kedifa
log = ${kedifa-config:logfile}
rotate-num = {{ instance_parameter_dict['configuration.rotate-num'] | int }}
delaycompress =
[kedifa]
recipe = slapos.cookbook:wrapper
command-line = {{ software_parameter_dict['kedifa'] }}
--ip ${kedifa-config:ip}
--port ${kedifa-config:port}
--db ${kedifa-config:db}
--certificate ${kedifa-config:certificate}
--ca-certificate ${kedifa-config:ca-certificate}
--crl ${kedifa-config:crl}
--pidfile ${kedifa-config:pidfile}
--logfile ${kedifa-config:logfile}
wrapper-path = ${directory:service}/kedifa
hash-existing-files = ${buildout:directory}/software_release/buildout.cfg
# Publish KeDiFa configuration for upload and download for each slave
{%- set slave_kedifa_information = {} -%}
{%- for slave in slapparameter_dict['slave-list'] -%}
{%- set slave_reference = slave['slave_reference'] -%}
{%- set slave_dict = {} -%}
{%- do slave_dict.__setitem__('key-generate-auth-url', 'https://[${kedifa-config:ip}]:${kedifa-config:port}/${%s-auth-random:passwd}/generateauth' % (slave_reference,)) -%}
{%- do slave_dict.__setitem__('key-upload-url', 'https://[${kedifa-config:ip}]:${kedifa-config:port}/${%s-auth-random:passwd}?auth=' % (slave_reference,)) -%}
{%- do slave_dict.__setitem__('key-download-url', 'https://[${kedifa-config:ip}]:${kedifa-config:port}/${%s-auth-random:passwd}' % (slave_reference,)) -%}
{%- do slave_dict.__setitem__('kedifa-caucase-url', caucase_url ) -%}
{%- do slave_kedifa_information.__setitem__(slave_reference, slave_dict) %}
[{{ slave_reference }}-auth-random-generate]
recipe = plone.recipe.command
file = ${directory:reservation}/${:_buildout_section_name_}
command =
[ ! -f ${:file} ] && {{ software_parameter_dict['curl'] }}/bin/curl -s -g -X POST https://[${kedifa-config:ip}]:${kedifa-config:port}/reserve-id --cert ${kedifa-config:certificate} --cacert ${kedifa-config:ca-certificate} > ${:file}.tmp && mv ${:file}.tmp ${:file}
update-command = ${:command}
[{{ slave_reference }}-auth-random]
recipe = collective.recipe.shelloutput
file = {{ '${' + slave_reference }}-auth-random-generate:file}
commands =
passwd = cat ${:file} 2>/dev/null || echo "NotReadyYet"
{% endfor %}
[master-auth-random-generate]
recipe = plone.recipe.command
file = ${directory:reservation}/${:_buildout_section_name_}
command =
[ ! -f ${:file} ] && {{ software_parameter_dict['curl'] }}/bin/curl -s -g -X POST https://[${kedifa-config:ip}]:${kedifa-config:port}/reserve-id --cert ${kedifa-config:certificate} --cacert ${kedifa-config:ca-certificate} > ${:file}.tmp && mv ${:file}.tmp ${:file}
update-command = ${:command}
[master-auth-random]
recipe = collective.recipe.shelloutput
file = ${master-auth-random-generate:file}
commands =
passwd = cat ${:file} 2>/dev/null || echo "NotReadyYet"
[slave-kedifa-information]
recipe = slapos.cookbook:publish.serialised
{# sort_keys are important in order to avoid shuffling parameters on each run #}
slave-kedifa-information = {{ json_module.dumps(slave_kedifa_information, sort_keys=True) }}
caucase-url = {{ caucase_url }}
master-key-generate-auth-url = https://[${kedifa-config:ip}]:${kedifa-config:port}/${master-auth-random:passwd}/generateauth
master-key-upload-url = https://[${kedifa-config:ip}]:${kedifa-config:port}/${master-auth-random:passwd}?auth=
master-key-download-url = https://[${kedifa-config:ip}]:${kedifa-config:port}/${master-auth-random:passwd}
kedifa-csr-url = ${expose-csr:url}/${expose-csr-link-csr:filename}
csr-certificate = ${expose-csr-certificate-get:certificate}
monitor-base-url = ${monitor-instance-parameter:monitor-base-url}
[promise-logrotate-setup]
<= monitor-promise-base
promise = check_command_execute
name = ${:_buildout_section_name_}.py
config-command =
${logrotate:wrapper-path} -d
{%- endif -%} {# if instance_parameter_dict['slap-software-type'] == software_type #}
instance-output-schema.json 0000664 0000000 0000000 00000005553 14241130220 0033704 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Values returned by Caddy Frontend instanciation",
"properties": {
"accepted-slave-amount": {
"description": "Amount of Slaves allocated to the Instance which are deployed",
"type": "integer"
},
"domain": {
"description": "Base domain used by the instance",
"type": "string"
},
"master-key-generate-auth-url": {
"description": "URL to GET once auth for master-key-upload-url",
"type": "string"
},
"master-key-upload-url": {
"description": "URL to PUT PEM bundle of main certificate and key",
"type": "string"
},
"monitor-base-url": {
"description": "Base url for monitor",
"type": "string"
},
"monitor-password": {
"description": "Password to access the monitor",
"type": "string"
},
"monitor-url": {
"description": "Url of Monitor (opml)",
"type": "string"
},
"monitor-user": {
"description": "User to access the monitor",
"type": "string"
},
"rejected-slave-amount": {
"description": "Rejected Amount of Slaves allocated to the Instance which are not deployed",
"type": "integer"
},
"rejected-slave-dict": {
"description": "Dict of slaves which were rejected. Keys are slave references, values are lists of errors on slaves.",
"type": "array"
},
"slave-amount": {
"description": "Total amount of Slaves allocated to the Instance (include blocked ones)",
"type": "integer"
},
"kedifa-csr-url": {
"description": "URL on which KeDiFa publishes its CSR sent to caucase.",
"type": "string"
},
"kedifa-csr-certificate": {
"description": "Certificate used to serve data on kedifa-csr-url.",
"type": "string"
},
"kedifa-caucase-url": {
"description": "Url to caucase used by KeDiFa.",
"type": "string"
},
"caddy-frontend-N-kedifa-csr-url": {
"description": "URL on which frontend node number N publishes its Kedifa CSR sent to caucase.",
"type": "string"
},
"caddy-frontend-N-backend-client-csr-url": {
"description": "URL on which frontend node number N publishes its Backend Client CSR sent to caucase.",
"type": "string"
},
"caddy-frontend-N-csr-certificate": {
"description": "Certificate used to serve data on CSRs.",
"type": "string"
},
"warning-slave-dict": {
"description": "Dict of slaves which got warnings. Keys are slave references, values are lists of warnings on slaves.",
"type": "array"
},
"warning-list": {
"description": "List of warning found during the request.",
"type": "array"
},
"backend-client-caucase-url": {
"description": "URL to caucase used by authentication to the backend.",
"type": "string"
}
},
"type": "object"
}
instance-slave-caddy-input-schema.json 0000664 0000000 0000000 00000034402 14241130220 0035670 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend {
"$schema": "http://json-schema.org/draft-04/schema",
"properties": {
"custom_domain": {
"description": "Custom Domain to use for the website. Shall contain only letters, numbers and -, and can look like example.com, first2.example.com special-site.example.com.",
"pattern": "^([a-zA-Z0-9]([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?\\.)+[a-zA-Z]{2,6}$",
"title": "Custom Domain",
"type": "string"
},
"url": {
"description": "URL of the backend",
"pattern": "^(http|https|ftp)://",
"title": "Backend URL",
"type": "string"
},
"type": {
"default": "",
"description": "Type of slave. If redirect, the slave will redirect to the given URL. If zope, the rewrite rules will be compatible with Virtual Host Monster.",
"enum": [
"",
"zope",
"redirect",
"notebook",
"websocket"
],
"title": "Backend Type",
"type": "string"
},
"path": {
"default": "",
"description": "Path to proxy to in the backend",
"title": "type:zope Backend Path",
"type": "string"
},
"enable_cache": {
"default": "false",
"description": "If set to true, http caching server (Apache Traffic Server) will be used between frontend Caddy and backend",
"enum": [
"false",
"true"
],
"title": "Enable Cache",
"type": "string"
},
"https-only": {
"default": "true",
"description": "If set to true, http requests will be redirected to https",
"enum": [
"false",
"true"
],
"title": "HTTPS Only",
"type": "string"
},
"default-path": {
"default": "",
"description": "Provide default path to redirect user to when user access / (the site root)",
"title": "type:zope Default Path",
"type": "string"
},
"disable-no-cache-request": {
"default": "false",
"description": "If set to true, Cache-Control and Pragma requests headers will not be sent to cache and backend servers. This prevents clients from bypassing cache when enable_cache is true",
"enum": [
"false",
"true"
],
"title": "enable_cache: Disable 'no-cache' requests",
"type": "string"
},
"disable-via-header": {
"default": "false",
"description": "If set to true, Via response headers will not be sent to client",
"enum": [
"false",
"true"
],
"title": "enable_cache: Disable 'Via' headers from cache",
"type": "string"
},
"disabled-cookie-list": {
"default": "",
"description": "List of Cookies separated by space that will not be sent to cache and backend servers. This is especially useful to discard analytics tracking cookies when using Vary: Cookie cache headers",
"title": "Disabled Cookies",
"type": "string"
},
"enable-http2": {
"default": "true",
"description": "Use HTTP2 Protocol for the site",
"enum": [
"true",
"false"
],
"title": "Enable HTTP2 Protocol",
"type": "string"
},
"https-url": {
"description": "HTTPS URL of the backend if it is different from url parameter",
"pattern": "^(http|https|ftp)://",
"title": "HTTPS Backend URL",
"type": "string"
},
"monitor-ipv4-test": {
"default": "",
"description": "IPv4 Address for the frontend keep monitoring with ping",
"title": "IPv4 Address to Monitor Packet Lost",
"type": "string"
},
"monitor-ipv6-test": {
"default": "",
"description": "IPv6 Address for the frontend keep monitoring with ping6 (without brackets)",
"title": "IPv6 Address to Monitor Packet Lost",
"type": "string"
},
"websocket-path-list": {
"default": "",
"description": "Space separated list of path to the websocket application. If not set the whole slave will be websocket, if set then / will be HTTP, and / will be WSS. In order to have ' ' in the space use '%20'",
"title": "type:websocket Websocket Application Path List",
"type": "string"
},
"websocket-transparent": {
"default": "true",
"description": "If set to false, websocket slave will be without Caddy's transparent proxy mode. Depending on the application the setting shall be false or true. Defaults to true for transparent proxying.",
"enum": [
"false",
"true"
],
"title": "type:websocket Transparent proxy",
"type": "string"
},
"prefer-gzip-encoding-to-backend": {
"default": "false",
"description": "If set to true, frontend will rewrite Accept-Encoding request header to simply 'gzip' for all variants of Accept-Encoding containing 'gzip', in order to maximize cache hits for resources cached with Vary: Accept-Encoding when enable_cache is used",
"enum": [
"false",
"true"
],
"title": "Prefer gzip Encoding for Backend",
"type": "string"
},
"server-alias": {
"default": "",
"description": "Server Alias List separated by space",
"title": "Server Alias",
"type": "string"
},
"ssl-proxy-verify": {
"default": "false",
"description": "If set to true, Backend SSL Certificates will be checked and frontend will refuse to proxy if certificate is invalid",
"enum": [
"false",
"true"
],
"title": "Verify Backend Certificates",
"type": "string"
},
"ssl_crt": {
"default": "",
"description": "Content of the SSL Certificate file. Deprecated, please use key-upload-url.",
"textarea": true,
"title": "[DEPRECATED] SSL Certificate",
"type": "string"
},
"ssl_key": {
"default": "",
"description": "Content of the SSL Key file. Deprecated, please use key-upload-url.",
"textarea": true,
"title": "[DEPRECATED] SSL Key",
"type": "string"
},
"ssl_ca_crt": {
"default": "",
"description": "Content of the CA certificate file. Deprecated, please use key-upload-url.",
"textarea": true,
"title": "[DEPRECATED] SSL Certificate Authority's Certificate",
"type": "string"
},
"ssl_proxy_ca_crt": {
"default": "",
"description": "Content of the SSL Certificate Authority file of the backend (to be used with ssl-proxy-verify)",
"textarea": true,
"title": "SSL Backend Authority's Certificate",
"type": "string"
},
"virtualhostroot-http-port": {
"default": 80,
"description": "Port where http requests to frontend will be redirected.",
"title": "type:zope virtualhostroot-http-port",
"type": "integer"
},
"virtualhostroot-https-port": {
"default": 443,
"description": "Port where https requests to frontend will be redirected.",
"title": "type:zope virtualhostroot-https-port",
"type": "integer"
},
"backend-connect-timeout": {
"description": "Time in seconds for establishing connection to the backend.",
"title": "Timeout for backend connection (seconds)",
"type": "integer"
},
"backend-connect-retries": {
"description": "Amount of retries to connect to the backend. The amount of backend-connect-timeout*backend-connect-retries seconds will be spent to connect to the backend.",
"title": "Amount of retries to connect to the backend.",
"type": "integer"
},
"request-timeout": {
"description": "Timeout for HTTP requests.",
"title": "HTTP Request timeout in seconds",
"type": "integer"
},
"ciphers": {
"description": "List of ciphers. Empty defaults to cluster list of ciphers, which by default are Caddy list of ciphers. See https://caddyserver.com/docs/tls for more information.",
"title": "Ordered space separated list of ciphers",
"type": "string"
},
"authenticate-to-backend": {
"description": "If set to true the frontend certificate will be used as authentication certificate to the backend. Note: backend might have to know the frontend CA, available with 'backend-client-caucase-url'.",
"enum": [
"false",
"true"
],
"title": "Authenticate to backend",
"type": "string"
},
"health-check": {
"title": "Health Check",
"description": "Enables active checks of the backend. For HTTP level checks the HTTP code shall be 2xx or 3xx, otherwise backend will be considered down.",
"enum": [
"false",
"true"
],
"default": "false",
"type": "string"
},
"health-check-http-method": {
"title": "Health Check HTTP Metod",
"description": "Selects method to do the active check. CONNECT means that connection will be enough for the check, otherwise it's HTTP method.",
"enum": [
"GET",
"OPTIONS",
"POST",
"CONNECT"
],
"default": "GET",
"type": "string"
},
"health-check-http-path": {
"title": "Health Check HTTP Path",
"description": "A path on which do the active check, unused in case of CONNECT.",
"default": "/",
"type": "string"
},
"health-check-http-version": {
"title": "Health Check HTTP Version",
"description": "A HTTP version to use to check the backend, unused in case of CONNECT.",
"enum": [
"HTTP/1.1",
"HTTP/1.0"
],
"default": "HTTP/1.1",
"type": "string"
},
"health-check-timeout": {
"title": "Health Check Timeout (seconds)",
"description": "A timeout to for the request to be fulfilled, after connection happen.",
"default": "2",
"type": "integer"
},
"health-check-interval": {
"title": "Health Check Interval (seconds)",
"description": "An interval of health check.",
"default": "5",
"type": "integer"
},
"health-check-rise": {
"title": "Health Check Rise",
"description": "Amount of correct responses from the backend to consider it up.",
"default": "1",
"type": "integer"
},
"health-check-fall": {
"title": "Health Check Fall",
"description": "Amount of bad responses from the backend to consider it down.",
"default": "1",
"type": "integer"
},
"health-check-failover-url": {
"description": "URL of the failover backend",
"pattern": "^(http|https|ftp)://",
"title": "Failover backend URL",
"type": "string"
},
"health-check-failover-https-url": {
"description": "HTTPS URL of the failover backend if it is different from health-check-failover-url parameter. Note: It requires https-url to be configured, as otherwise the differentiation does not make sense..",
"pattern": "^(http|https|ftp)://",
"title": "Failover HTTPS Backend URL",
"type": "string"
},
"health-check-authenticate-to-failover-backend": {
"description": "If set to true the frontend certificate will be used as authentication certificate to the failover backend. Note: failover backend might have to know the frontend CA, available with 'backend-client-caucase-url'.",
"enum": [
"false",
"true"
],
"title": "Authenticate to failover backend",
"type": "string"
},
"health-check-failover-ssl-proxy-verify": {
"default": "false",
"description": "If set to true, failover backend SSL Certificates will be checked and frontend will refuse to proxy if certificate is invalid",
"enum": [
"false",
"true"
],
"title": "Verify failover backend certificates",
"type": "string"
},
"health-check-failover-ssl-proxy-ca-crt": {
"default": "",
"description": "Content of the SSL Certificate Authority file of the failover backend (to be used with health-check-failover-ssl-proxy-verify)",
"textarea": true,
"title": "SSL failover backend Authority's Certificate",
"type": "string"
},
"strict-transport-security": {
"title": "Strict Transport Security",
"description": "Enables Strict Transport Security (HSTS) on the slave, the default 0 results with option disabled. Setting the value enables HSTS and sets the value of max-age. More information: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security",
"default": "0",
"type": "integer"
},
"strict-transport-security-sub-domains": {
"title": "Strict Transport Security Sub Domains",
"description": "Configures Strict Transport Security for sub domains.",
"enum": [
"false",
"true"
],
"type": "string",
"default": "false"
},
"strict-transport-security-preload": {
"title": "Strict Transport Security Preload",
"description": "Configures Strict Transport Security preload mechanism.",
"enum": [
"false",
"true"
],
"type": "string",
"default": "false"
},
"url-netloc-list": {
"type": "string",
"title": "[EXPERT] List of netlocs for \"Backend URL\"",
"description": "Space separated list of netlocs (ip and port) of backend to connect to. They will share the scheme and path of the original URL and additional backend parameters (like \"SSL Backend Authority's Certificate\"). Each of them will be used, and at least one is enough for the connectivity to work, and the best results are with \"Health Check\" feature enabled. Port is mandatory, so hostnames shall be provided as hostname:port (eg. example.com:80), IPv4 - as ipv4:port (eg. 127.0.0.1:80), IPv6 - as ipv6:port (eg. ::1:80). Simply this parameters only overrides netloc (network location) of the original URL."
},
"https-url-netloc-list": {
"type": "string",
"title": "[EXPERT] List of netlocs for \"HTTPS Backend URL\"",
"description": "See \"[EXPERT] List of netlocs for \"Backend URL\"\" description."
},
"health-check-failover-url-netloc-list": {
"type": "string",
"title": "[EXPERT] List of netlocs for \"Failover backend URL\"",
"description": "See \"[EXPERT] List of netlocs for \"Backend URL\"\" description."
},
"health-check-failover-https-url-netloc-list": {
"type": "string",
"title": "[EXPERT] List of netlocs for \"Failover HTTPS Backend URL\"",
"description": "See \"[EXPERT] List of netlocs for \"Backend URL\"\" description."
}
},
"title": "Input Parameters",
"type": "object"
}
instance-slave-caddy-simplified-input-schema.json 0000664 0000000 0000000 00000006536 14241130220 0040022 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend {
"$schema": "http://json-schema.org/draft-04/schema",
"properties": {
"url": {
"description": "Url of the backend",
"pattern": "^(http|https|ftp)://",
"title": "Backend URL",
"type": "string"
},
"type": {
"default": "",
"description": "Type of slave. If redirect, the slave will redirect to the given url. If zope, the rewrite rules will be compatible with Virtual Host Monster.",
"enum": [
"",
"zope"
],
"title": "Backend Type",
"type": "string"
},
"custom_domain": {
"description": "Custom Domain to use for the website",
"pattern": "^([a-zA-Z0-9]([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?\\.)+[a-zA-Z]{2,6}$",
"title": "Custom Domain",
"type": "string"
},
"server-alias": {
"default": "",
"description": "Server Alias List separated by space",
"title": "Server Alias",
"type": "string"
},
"default-path": {
"default": "",
"description": "Provide default path to redirect user to when user access / (the site root)",
"title": "type:zope Default Path",
"type": "string"
},
"disable-no-cache-request": {
"default": "false",
"description": "If set to true, Cache-Control and Pragma requests headers will not be sent to cache and backend servers. This prevents clients from bypassing cache when enable_cache is true",
"enum": [
"false",
"true"
],
"title": "enable_cache: Disable 'no-cache' requests",
"type": "string"
},
"disable-via-header": {
"default": "false",
"description": "If set to true, Via response headers will not be sent to client",
"enum": [
"false",
"true"
],
"title": "enable_cache: Disable 'Via' headers from cache",
"type": "string"
},
"disabled-cookie-list": {
"default": "",
"description": "List of Cookies separated by space that will not be sent to cache and backend servers. This is especially useful to discard analytics tracking cookies when using Vary: Cookie cache headers",
"title": "Disabled Cookies",
"type": "string"
},
"enable_cache": {
"default": "false",
"description": "If set to true, http caching server (Apache Traffic Server) will be used between frontend Caddy and backend",
"enum": [
"false",
"true"
],
"title": "Enable Cache",
"type": "string"
},
"https-only": {
"default": "true",
"description": "If set to true, http requests will be redirected to https",
"enum": [
"false",
"true"
],
"title": "HTTPS Only",
"type": "string"
},
"path": {
"default": "",
"description": "Path to proxy to in the backend",
"title": "type:zope Backend Path",
"type": "string"
},
"prefer-gzip-encoding-to-backend": {
"default": "false",
"description": "If set to true, frontend will rewrite Accept-Encoding request header to simply 'gzip' for all variants of Accept-Encoding containing 'gzip', in order to maximize cache hits for resources cached with Vary: Accept-Encoding when enable_cache is used",
"enum": [
"false",
"true"
],
"title": "Prefer gzip Encoding for Backend",
"type": "string"
}
},
"title": "Input Parameters",
"type": "object"
}
instance-slave-output-schema.json 0000664 0000000 0000000 00000003102 14241130220 0035000 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Values returned by Caddy Frontend instanciation",
"properties": {
"domain": {
"description": "Base domain used by the instance",
"type": "string"
},
"key-generate-auth-url": {
"description": "URL to GET once auth for key-upload-url",
"type": "string"
},
"key-upload-url": {
"description": "URL to PUT PEM bundle of certificate and key",
"type": "string"
},
"log-access-url": {
"description": "List of URLs to access logs",
"type": "array"
},
"replication_number": {
"description": "Number of nodes the slave is replicated",
"type": "integer"
},
"secure_access": {
"description": "URL for HTTP access",
"type": "string"
},
"site_url": {
"description": "URL for HTTP access",
"type": "string"
},
"url": {
"description": "Default URL provided",
"type": "string"
},
"request-error-list": {
"description": "In case if slave has been rejected by master or has error in the request, the list contains information about each problem",
"type": "array"
},
"warning-list": {
"description": "List of warning found during the request.",
"type": "array"
},
"kedifa-caucase-url": {
"description": "URL to caucase used by KeDiFa",
"type": "string"
},
"backend-client-caucase-url": {
"description": "URL to caucase used by authentication to the backend.",
"type": "string"
}
},
"type": "object"
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/instance.cfg.in 0000664 0000000 0000000 00000007502 14241130220 0031356 0 ustar 00root root 0000000 0000000 [buildout]
extends = {{ software_parameter_dict['profile_common'] }}
parts =
caddyprofiledeps
switch-softwaretype
[caddyprofiledeps]
recipe = caddyprofiledeps
[jinja2-template-base]
recipe = slapos.recipe.template:jinja2
output = ${buildout:directory}/${:filename}
extensions = jinja2.ext.do
extra-context =
context =
import json_module json
key slapparameter_dict slap-configuration:configuration
section instance_parameter_dict slap-configuration
section software_parameter_dict software-parameter-section
${:extra-context}
caucase-jinja2-library = {{ software_parameter_dict['caucase_jinja2_library'] }}
import-list =
file caucase :caucase-jinja2-library
[switch-softwaretype]
recipe = slapos.cookbook:switch-softwaretype
default = dynamic-profile-caddy-replicate:output
RootSoftwareInstance = ${:default}
custom-personal = dynamic-profile-caddy-replicate:output
single-default = dynamic-profile-caddy-frontend:output
single-custom-personal = dynamic-profile-caddy-frontend:output
replicate = dynamic-profile-caddy-replicate:output
kedifa = dynamic-profile-kedifa:output
[software-parameter-section]
{% for key,value in software_parameter_dict.iteritems() %}
{{ key }} = {{ dumps(value) }}
{% endfor -%}
[dynamic-profile-caddy-frontend]
< = jinja2-template-base
url = {{ software_parameter_dict['profile_caddy_frontend'] }}
filename = instance-caddy-frontend.cfg
extra-context =
import furl_module furl
raw software_type single-custom-personal
[dynamic-profile-caddy-replicate]
< = jinja2-template-base
depends = ${caddyprofiledeps:recipe}
url = {{ software_parameter_dict['profile_caddy_replicate'] }}
filename = instance-caddy-replicate.cfg
extra-context =
import subprocess_module subprocess
import functools_module functools
import validators validators
import caddyprofiledummy caddyprofiledummy
# Must match the key id in [switch-softwaretype] which uses this section.
raw software_type RootSoftwareInstance-default-custom-personal-replicate
[dynamic-profile-kedifa]
< = jinja2-template-base
url = {{ software_parameter_dict['profile_kedifa'] }}
filename = instance-kedifa.cfg
extra-context =
raw software_type kedifa
[slap-configuration]
# Fetches parameters defined in SlapOS Master for this instance.
# Always the same.
recipe = slapos.cookbook:slapconfiguration.serialised
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
# Define default parameter(s) that will be used later, in case user didn't
# specify it
# All parameters are available through the configuration.XX syntax.
# All possible parameters should have a default.
configuration.domain = example.org
configuration.port = 4443
configuration.plain_http_port = 8080
configuration.plain_nginx_port = 8081
configuration.nginx_port = 9443
configuration.kedifa_port = 7879
# Warning: Caucase takes also cacuase_port+1
configuration.caucase_port = 8890
configuration.caucase_backend_client_port = 8990
configuration.apache-key =
configuration.apache-certificate =
configuration.open-port = 80 443
configuration.disk-cache-size = 8G
configuration.ram-cache-size = 1G
configuration.re6st-verification-url = http://[2001:67c:1254:4::1]/index.html
configuration.enable-http2-by-default = true
configuration.global-disable-http2 = false
configuration.ciphers =
configuration.request-timeout = 600
configuration.mpm-graceful-shutdown-timeout = 5
configuration.frontend-name =
configuration.backend-connect-timeout = 5
configuration.backend-connect-retries = 3
configuration.backend-haproxy-http-port = 21080
configuration.backend-haproxy-https-port = 21443
configuration.backend-haproxy-statistic-port = 21444
configuration.authenticate-to-backend = False
configuration.rotate-num = 4000
configuration.slave-introspection-https-port = 22443
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/setup.py 0000664 0000000 0000000 00000001135 14241130220 0030172 0 ustar 00root root 0000000 0000000 # The caddyprofiledeps egg allows to set dependecies of the Caddy profiles
# which are enabled during the instance run, thanks to using caddyprofiledeps
# recipe
from setuptools import setup
setup(
name='caddyprofiledeps',
install_requires=[
'validators',
'furl',
'orderedmultidict',
'caucase',
'python2-secrets',
],
entry_points={
'zc.buildout': [
'default = caddyprofiledummy:Recipe',
],
'console_scripts': [
'smart-caucase-signer = caddyprofiledummy:smart_sign',
'caucase-csr-sign-check = caddyprofiledummy:caucase_csr_sign_check'
]
}
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/software.cfg 0000664 0000000 0000000 00000015162 14241130220 0031000 0 ustar 00root root 0000000 0000000 [buildout]
extends =
buildout.hash.cfg
../../stack/slapos.cfg
../../component/dash/buildout.cfg
../../component/caddy/buildout.cfg
../../component/gzip/buildout.cfg
../../component/logrotate/buildout.cfg
../../component/trafficserver/buildout.cfg
../../component/6tunnel/buildout.cfg
../../component/xz-utils/buildout.cfg
../../component/rsyslogd/buildout.cfg
../../component/numpy/buildout.cfg
../../component/haproxy/buildout.cfg
../../component/nginx/buildout.cfg
../../stack/caucase/buildout.cfg
parts +=
caucase-eggs
template
caddyprofiledeps
kedifa
[kedifa]
recipe = zc.recipe.egg
eggs =
${python-cryptography:egg}
kedifa
[caddyprofiledeps-setup]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/setup.py
[caddyprofiledeps-dummy]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/caddyprofiledummy.py
[caddyprofiledeps-prepare]
recipe = plone.recipe.command
stop-on-error = True
location = ${buildout:parts-directory}/${:_buildout_section_name_}
update-command = ${:command}
command =
rm -fr ${:location} &&
mkdir -p ${:location} &&
cp ${caddyprofiledeps-setup:target} ${:location}/ &&
cp ${caddyprofiledeps-dummy:target} ${:location}/
[caddyprofiledeps-develop]
recipe = zc.recipe.egg:develop
setup = ${caddyprofiledeps-prepare:location}
[caddyprofiledeps]
depends = ${caddyprofiledeps-develop:recipe}
recipe = zc.recipe.egg
eggs =
caddyprofiledeps
websockify
collective.recipe.shelloutput
[profile-common]
recipe = slapos.recipe.template:jinja2
url = ${:_profile_base_location_}/instance-common.cfg.in
output = ${buildout:directory}/instance-common.cfg
context =
key develop_eggs_directory buildout:develop-eggs-directory
key eggs_directory buildout:eggs-directory
[software-parameter-section]
# libraries
caucase_jinja2_library = ${caucase-jinja2-library:target}
# profiles
profile_caddy_frontend = ${profile-caddy-frontend:target}
profile_caddy_replicate = ${profile-caddy-replicate:target}
profile_common = ${profile-common:output}
profile_kedifa = ${profile-kedifa:target}
profile_logrotate_base = ${template-logrotate-base:output}
profile_monitor = ${monitor-template:output}
profile_monitor2 = ${monitor2-template:output}
profile_replicate_publish_slave_information = ${profile-replicate-publish-slave-information:target}
profile_slave_list = ${profile-slave-list:target}
# templates
template_backend_haproxy_configuration = ${template-backend-haproxy-configuration:target}
template_backend_haproxy_rsyslogd_conf = ${template-backend-haproxy-rsyslogd-conf:target}
template_caddy_frontend_configuration = ${profile-caddy-frontend-configuration:target}
template_caddy_lazy_script_call = ${template-caddy-lazy-script-call:target}
template_configuration_state_script = ${template-configuration-state-script:target}
template_default_slave_virtualhost = ${template-default-slave-virtualhost:target}
template_empty = ${template-empty:target}
template_graceful_script = ${template-graceful-script:target}
template_not_found_html = ${template-not-found-html:target}
template_rotate_script = ${template-rotate-script:target}
template_slave_introspection_httpd_nginx = ${template-slave-introspection-httpd-nginx:target}
template_trafficserver_logging_yaml = ${template-trafficserver-logging-yaml:target}
template_trafficserver_records_config = ${template-trafficserver-records-config:target}
template_trafficserver_storage_config = ${template-trafficserver-storage-config:target}
template_validate_script = ${template-validate-script:target}
template_wrapper = ${template-wrapper:output}
template_expose_csr_nginx_conf = ${template-expose-csr-nginx-conf:target}
# directories
bin_directory = ${buildout:bin-directory}
# files
sixtunnel = ${6tunnel:location}
nginx = ${nginx-output:nginx}
nginx_mime = ${nginx-output:mime}
caddy = ${caddy:output}
haproxy_executable = ${haproxy:location}/sbin/haproxy
rsyslogd_executable = ${rsyslogd:location}/sbin/rsyslogd
curl = ${curl:location}
dash = ${dash:location}
gzip = ${gzip:location}
logrotate = ${logrotate:location}
openssl = ${openssl:location}/bin/openssl
openssl_cnf = ${openssl:location}/etc/ssl/openssl.cnf
trafficserver = ${trafficserver:location}
sha256sum = ${coreutils:location}/bin/sha256sum
kedifa = ${:bin_directory}/kedifa
kedifa-updater = ${:bin_directory}/kedifa-updater
kedifa-csr = ${:bin_directory}/kedifa-csr
xz_location = ${xz-utils:location}
htpasswd = ${:bin_directory}/htpasswd
smart_caucase_signer = ${:bin_directory}/smart-caucase-signer
caucase_csr_sign_check = ${:bin_directory}/caucase-csr-sign-check
[template]
recipe = slapos.recipe.template:jinja2
url = ${:_profile_base_location_}/instance.cfg.in
output = ${buildout:directory}/template.cfg
context =
section software_parameter_dict software-parameter-section
[profile-caddy-frontend]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/instance-apache-frontend.cfg.in
[profile-caddy-replicate]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/instance-apache-replicate.cfg.in
[profile-kedifa]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/instance-kedifa.cfg.in
[download-template]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:_update_hash_filename_}
[profile-slave-list]
<=download-template
[profile-replicate-publish-slave-information]
<=download-template
[profile-caddy-frontend-configuration]
<=download-template
[template-not-found-html]
<=download-template
[template-default-slave-virtualhost]
<=download-template
[template-backend-haproxy-configuration]
<=download-template
[template-empty]
<=download-template
[template-slave-introspection-httpd-nginx]
<=download-template
[template-wrapper]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/templates/wrapper.in
output = ${buildout:directory}/template-wrapper.cfg
[template-trafficserver-records-config]
<=download-template
[template-trafficserver-storage-config]
<=download-template
[template-trafficserver-logging-yaml]
<=download-template
[template-rotate-script]
<=download-template
[template-caddy-lazy-script-call]
<=download-template
[template-graceful-script]
<=download-template
[template-validate-script]
<=download-template
[template-configuration-state-script]
<=download-template
[template-backend-haproxy-rsyslogd-conf]
<=download-template
[template-expose-csr-nginx-conf]
<=download-template
[versions]
kedifa = 0.0.6
# Modern KeDiFa requires zc.lockfile
zc.lockfile = 1.4
python2-secrets = 1.0.5
validators = 0.12.2
PyRSS2Gen = 1.1
cns.recipe.symlink = 0.2.3
ecdsa = 0.13
gitdb = 0.6.4
plone.recipe.command = 1.1
pycrypto = 2.6.1
smmap = 0.9.0
websockify = 0.8.0
furl = 2.1.0
orderedmultidict = 1.0.1
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/software.cfg.json 0000664 0000000 0000000 00000002655 14241130220 0031753 0 ustar 00root root 0000000 0000000 {
"description": "Caddy Frontend",
"name": "Caddy Frontend",
"serialisation": "xml",
"software-type": {
"custom-personal": {
"description": "",
"index": 2,
"request": "instance-caddy-input-schema.json",
"response": "instance-output-schema.json",
"title": "Custom Personal"
},
"custom-personal-slave": {
"description": "Custom Personal",
"index": 1,
"request": "instance-slave-caddy-input-schema.json",
"response": "instance-output-schema.json",
"shared": true,
"software-type": "custom-personal",
"title": "Custom Personal (Slave)"
},
"default": {
"description": "Default",
"index": 3,
"request": "instance-caddy-input-schema.json",
"response": "instance-output-schema.json",
"software-type": "default",
"title": "Default"
},
"default-slave": {
"description": "Default",
"index": 4,
"request": "instance-slave-caddy-input-schema.json",
"response": "instance-output-schema.json",
"shared": true,
"software-type": "default",
"title": "Default (slave)"
},
"default-simplified-slave": {
"description": "Default",
"index": 5,
"request": "instance-slave-caddy-simplified-input-schema.json",
"response": "instance-output-schema.json",
"shared": true,
"software-type": "default",
"title": "Default Basic (slave)"
}
}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates/ 0000775 0000000 0000000 00000000000 14241130220 0030456 5 ustar 00root root 0000000 0000000 Caddyfile.in 0000664 0000000 0000000 00000002656 14241130220 0032624 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates # Main caddy configuration file
import {{ slave_configuration_directory }}/*.conf
:{{ https_port }} {
tls {{ master_certificate }} {{ master_certificate }}
bind {{ local_ipv4 }}
status 404 /
log / {{ access_log }} "{remote} - {>REMOTE_USER} [{when}] \"{method} {uri} {proto}\" {status} {size} \"{>Referer}\" \"{>User-Agent}\" {latency_ms}" {
rotate_size 10000000
}
errors {{ error_log }} {
rotate_size 10000000
* {{ not_found_file }}
}
}
:{{ http_port }} {
bind {{ local_ipv4 }}
status 404 /
log / {{ access_log }} "{remote} - {>REMOTE_USER} [{when}] \"{method} {uri} {proto}\" {status} {size} \"{>Referer}\" \"{>User-Agent}\" {latency_ms}" {
rotate_size 10000000
}
errors {{ error_log }} {
rotate_size 10000000
* {{ not_found_file }}
}
}
# Access to server-status Caddy-style
https://[{{ global_ipv6 }}]:{{ https_port }}/server-status, https://{{ local_ipv4 }}:{{ https_port }}/server-status {
tls {{ frontend_configuration['ip-access-certificate'] }} {{ frontend_configuration['ip-access-certificate'] }}
bind {{ local_ipv4 }}
basicauth "{{ username }}" {{ password | trim }} {
"Server Status"
/
}
expvar
pprof
log / {{ access_log }} "{remote} - {>REMOTE_USER} [{when}] \"{method} {uri} {proto}\" {status} {size} \"{>Referer}\" \"{>User-Agent}\" {latency_ms}" {
rotate_size 10000000
}
errors {{ error_log }} {
rotate_size 10000000
* {{ not_found_file }}
}
}
apache-custom-slave-list.cfg.in 0000664 0000000 0000000 00000074127 14241130220 0036312 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates {%- set kedifa_updater_mapping = [] %}
{%- set cached_server_dict = {} %}
{%- set backend_slave_list = [] %}
{%- set frontend_slave_list = [] %}
{%- set part_list = [] %}
{%- set cache_port = caddy_configuration.get('cache-port') %}
{%- set cache_access = "http://%s:%s/HTTP" % (instance_parameter_dict['ipv4-random'], cache_port) %}
{%- set ssl_cache_access = "http://%s:%s/HTTPS" % (instance_parameter_dict['ipv4-random'], cache_port) %}
{%- set backend_haproxy_http_url = 'http://%s:%s' % (instance_parameter_dict['ipv4-random'], backend_haproxy_configuration['http-port']) %}
{%- set backend_haproxy_https_url = 'http://%s:%s' % (instance_parameter_dict['ipv4-random'], backend_haproxy_configuration['https-port']) %}
{%- set TRUE_VALUES = ['y', 'yes', '1', 'true'] %}
{%- set generic_instance_parameter_dict = { 'cache_access': cache_access, 'local_ipv4': instance_parameter_dict['ipv4-random'], 'http_port': configuration['plain_http_port'], 'https_port': configuration['port']} %}
{%- set slave_log_dict = {} %}
{%- set slave_instance_information_list = [] %}
{%- set slave_instance_list = instance_parameter_dict['slave-instance-list'] %}
{%- if configuration['extra_slave_instance_list'] %}
{%- do slave_instance_list.extend(json_module.loads(configuration['extra_slave_instance_list'])) %}
{%- endif %}
{%- if master_key_download_url %}
{%- do kedifa_updater_mapping.append((master_key_download_url, caddy_configuration['master-certificate'], apache_certificate)) %}
{%- else %}
{%- do kedifa_updater_mapping.append(('notreadyyet', caddy_configuration['master-certificate'], apache_certificate)) %}
{%- endif %}
{%- if kedifa_configuration['slave_kedifa_information'] %}
{%- set slave_kedifa_information = json_module.loads(kedifa_configuration['slave_kedifa_information']) %}
{%- else %}
{%- set slave_kedifa_information = {} %}
{%- endif -%}
[jinja2-template-base]
recipe = slapos.recipe.template:jinja2
extensions = jinja2.ext.do
extra-context =
context =
raw profile_common {{ profile_common }}
${:extra-context}
# empty sections if no slaves are available
[slave-log-directory-dict]
[slave-password]
[slave-htpasswd]
{#- Prepare configuration parameters #}
{%- set DEFAULT_PORT = {'http': 80, 'https': 443, '': None} %}
{%- for key in ['enable-http2-by-default', 'global-disable-http2'] %}
{%- do configuration.__setitem__(key, ('' ~ configuration[key]).lower() in TRUE_VALUES) %}
{%- endfor %}
{#- Loop thought slave list to set up slaves #}
{%- for slave_instance in slave_instance_list %}
{#- Prepare slave parameters: #}
{#- * convert strings to booleans (as slapproxy and SlapOS Master differ a bit) #}
{#- * create real lists from string lists #}
{#- * setup defaults to simplify other profiles #}
{#- * stabilise values for backend #}
{%- for key, prefix in [('url', 'http_backend'), ('https-url', 'https_backend')] %}
{%- set parsed = urlparse_module.urlparse(slave_instance.get(key, '').strip()) %}
{%- set info_dict = {'scheme': parsed.scheme, 'hostname': parsed.hostname, 'port': parsed.port or DEFAULT_PORT[parsed.scheme], 'path': parsed.path, 'fragment': parsed.fragment, 'query': parsed.query, 'netloc-list': slave_instance.get(key + '-netloc-list', '').split() } %}
{%- do slave_instance.__setitem__(prefix, info_dict) %}
{%- endfor %}
{%- do slave_instance.__setitem__('ssl_proxy_verify', ('' ~ slave_instance.get('ssl-proxy-verify', '')).lower() in TRUE_VALUES) %}
{%- for key, prefix in [('health-check-failover-url', 'http_backend'), ('health-check-failover-https-url', 'https_backend')] %}
{%- set parsed = urlparse_module.urlparse(slave_instance.get(key, '').strip()) %}
{%- set info_dict = slave_instance[prefix] %}
{%- do info_dict.__setitem__('health-check-failover-scheme', parsed.scheme) %}
{%- do info_dict.__setitem__('health-check-failover-hostname', parsed.hostname) %}
{%- do info_dict.__setitem__('health-check-failover-port', parsed.port or DEFAULT_PORT[parsed.scheme]) %}
{%- do info_dict.__setitem__('health-check-failover-path', parsed.path) %}
{%- do info_dict.__setitem__('health-check-failover-query', parsed.query) %}
{%- do info_dict.__setitem__('health-check-failover-fragment', parsed.fragment) %}
{%- do info_dict.__setitem__('health-check-netloc-list', slave_instance.get('health-check-failover-url-netloc-list', '').split()) %}
{%- do slave_instance.__setitem__(prefix, info_dict) %}
{%- endfor %}
{%- do slave_instance.__setitem__('health-check-failover-ssl-proxy-verify', ('' ~ slave_instance.get('health-check-failover-ssl-proxy-verify', '')).lower() in TRUE_VALUES) %}
{%- do slave_instance.__setitem__('enable-http2', ('' ~ slave_instance.get('enable-http2', configuration['enable-http2-by-default'])).lower() in TRUE_VALUES) %}
{%- for key in ['https-only', 'websocket-transparent'] %}
{%- do slave_instance.__setitem__(key, ('' ~ slave_instance.get(key, 'true')).lower() in TRUE_VALUES) %}
{%- endfor %}
{%- for key in ['enable_cache', 'disable-no-cache-request', 'disable-via-header', 'prefer-gzip-encoding-to-backend', 'strict-transport-security-sub-domains', 'strict-transport-security-preload'] %}
{%- do slave_instance.__setitem__(key, ('' ~ slave_instance.get(key, 'false')).lower() in TRUE_VALUES) %}
{%- endfor %}
{%- for key in ['disabled-cookie-list'] %}
{%- do slave_instance.__setitem__(key, slave_instance.get(key, '').split()) %}
{%- endfor %}
{%- for key, default in [('virtualhostroot-http-port', '80'), ('virtualhostroot-https-port', '443'), ('strict-transport-security', '0')] %}
{%- do slave_instance.__setitem__(key, int(slave_instance.get(key, default))) %}
{%- endfor %}
{%- do slave_instance.__setitem__('default-path', slave_instance.get('default-path', '').strip('/') | urlencode) %}
{%- do slave_instance.__setitem__('path', slave_instance.get('path', '').strip('/')) %}
{#- Manage ciphers #}
{%- set slave_ciphers = slave_instance.get('ciphers', '').strip().split() %}
{%- if slave_ciphers %}
{%- set slave_cipher_list = ' '.join(slave_ciphers) %}
{%- else %}
{%- set slave_cipher_list = configuration['ciphers'].strip() %}
{%- endif %}
{%- do slave_instance.__setitem__('ciphers', slave_cipher_list) %}
{#- Manage common instance parameters #}
{%- set slave_type = slave_instance.get('type', '') %}
{%- set enable_cache = (slave_instance['enable_cache'] and slave_type != 'redirect') %}
{%- set slave_reference = slave_instance.get('slave_reference') %}
{%- set slave_kedifa = slave_kedifa_information.get(slave_reference) %}
{#- Setup backend URLs for front facing Caddy #}
{%- if slave_type == 'redirect' %}
{%- do slave_instance.__setitem__('backend-http-url', slave_instance.get('url', '').rstrip('/')) %}
{%- if slave_instance.get('https-url') %}
{%- do slave_instance.__setitem__('backend-https-url', slave_instance.get['https-url'].rstrip('/')) %}
{%- endif %}
{%- elif enable_cache %}
{%- if 'domain' in slave_instance %}
{%- if not slave_instance.get('custom_domain') %}
{%- do slave_instance.__setitem__('custom_domain', slave_instance.get('domain')) %}
{%- endif %}
{%- endif %}
{%- do slave_instance.__setitem__('backend-http-url', cache_access) %}
{%- if slave_instance.get('https-url') %}
{%- do slave_instance.__setitem__('backend-https-url', ssl_cache_access) %}
{%- endif %}
{%- do cached_server_dict.__setitem__(slave_reference, slave_configuration_section_name) %}
{%- else %}
{%- do slave_instance.__setitem__('backend-http-url', backend_haproxy_http_url) %}
{%- if slave_instance.get('https-url') %}
{%- do slave_instance.__setitem__('backend-https-url', backend_haproxy_https_url) %}
{%- endif %}
{%- endif %}
{%- if slave_kedifa %}
{%- set key_download_url = slave_kedifa.get('key-download-url') %}
{%- else %}
{%- set key_download_url = 'notreadyyet' %}
{%- endif %}
{%- set slave_section_title = 'dynamic-template-slave-instance-%s' % slave_reference %}
{%- set slave_parameter_dict = generic_instance_parameter_dict.copy() %}
{%- set slave_publish_dict = {} %}
{%- set slave_configuration_section_name = 'slave-instance-%s-configuration' % slave_reference %}
{%- set slave_logrotate_section = slave_reference + "-logs" %}
{%- set slave_log_directory_section = slave_reference + "-log-directory" %}
{%- set slave_password_section = slave_reference + "-password" %}
{%- set slave_htpasswd_section = slave_reference + "-htpasswd" %}
{%- set slave_ln_section = slave_reference + "-ln" %}
{#- extend parts #}
{%- do part_list.extend([slave_ln_section]) %}
{%- do part_list.extend([slave_section_title]) %}
{%- set slave_log_folder = '${logrotate-directory:logrotate-backup}/' + slave_reference + "-logs" %}
{#- Pass backend timeout values #}
{%- for key in ['backend-connect-timeout', 'backend-connect-retries', 'request-timeout', 'authenticate-to-backend'] %}
{%- if slave_instance.get(key, '') == '' %}
{%- do slave_instance.__setitem__(key, configuration[key]) %}
{%- endif %}
{%- endfor %}
{%- do slave_instance.__setitem__('strict-transport-security', int(slave_instance['strict-transport-security'])) %}
{%- do slave_instance.__setitem__('authenticate-to-backend', ('' ~ slave_instance.get('authenticate-to-backend', '')).lower() in TRUE_VALUES) %}
{%- do slave_instance.__setitem__('health-check-authenticate-to-failover-backend', ('' ~ slave_instance.get('health-check-authenticate-to-failover-backend', '')).lower() in TRUE_VALUES) %}
{#- Setup active check #}
{%- do slave_instance.__setitem__('health-check', ('' ~ slave_instance.get('health-check', '')).lower() in TRUE_VALUES) %}
{%- if slave_instance['health-check'] %}
{%- if 'health-check-http-method' not in slave_instance %}
{%- do slave_instance.__setitem__('health-check-http-method', 'GET') %}
{%- endif %}
{%- if 'health-check-http-version' not in slave_instance %}
{%- do slave_instance.__setitem__('health-check-http-version', 'HTTP/1.1') %}
{%- endif %}
{%- if 'health-check-interval' not in slave_instance %}
{%- do slave_instance.__setitem__('health-check-interval', '5') %}
{%- endif %}
{%- if 'health-check-rise' not in slave_instance %}
{%- do slave_instance.__setitem__('health-check-rise', '1') %}
{%- endif %}
{%- if 'health-check-fall' not in slave_instance %}
{%- do slave_instance.__setitem__('health-check-fall', '2') %}
{%- endif %}
{%- if 'health-check-timeout' not in slave_instance %}
{%- do slave_instance.__setitem__('health-check-timeout', '2') %}
{%- endif %}
{%- do slave_instance.__setitem__('health-check-http-path', slave_instance.get('health-check-http-path') or '/') %}
{%- else %}
{%- do slave_instance.__setitem__('health-check-http-method', '') %}
{%- do slave_instance.__setitem__('health-check-http-version', '') %}
{%- do slave_instance.__setitem__('health-check-http-path', '') %}
{%- endif %} {# if slave_instance['health-check'] #}
{#- Set Up log files #}
{%- do slave_parameter_dict.__setitem__('access_log', '/'.join([caddy_log_directory, '%s_access_log' % slave_reference])) %}
{%- do slave_parameter_dict.__setitem__('error_log', '/'.join([caddy_log_directory, '%s_error_log' % slave_reference])) %}
{%- do slave_parameter_dict.__setitem__('backend_log', '/'.join([caddy_log_directory, '%s_backend_log' % slave_reference])) %}
{%- do slave_instance.__setitem__('access_log', slave_parameter_dict.get('access_log')) %}
{%- do slave_instance.__setitem__('error_log', slave_parameter_dict.get('error_log')) %}
{%- do slave_instance.__setitem__('backend_log', slave_parameter_dict.get('backend_log')) %}
{#- Add slave log directory to the slave log access dict #}
{%- do slave_log_dict.__setitem__(slave_reference, slave_log_folder) %}
{%- set furled = furl_module.furl(frontend_configuration['slave-introspection-secure_access']) %}
{%- do furled.set(username = slave_reference.lower()) %}
{%- do furled.set(password = '${'+ slave_password_section +':passwd}') %}
{%- do furled.set(path = slave_reference + '/') %}
{#- We unquote, as furl quotes automatically, but there is buildout value on purpose like ${...:...} in the passwod #}
{%- set slave_log_access_url = urlparse_module.unquote(furled.tostr()) %}
{%- do slave_publish_dict.__setitem__('log-access', slave_log_access_url) %}
{%- do slave_publish_dict.__setitem__('slave-reference', slave_reference) %}
{%- do slave_publish_dict.__setitem__('backend-client-caucase-url', backend_client_caucase_url) %}
{#- Set slave domain if none was defined #}
{%- if slave_instance.get('custom_domain', None) == None %}
{%- set domain_prefix = slave_instance.get('slave_reference').replace("-", "").replace("_", "").lower() %}
{%- do slave_instance.__setitem__('custom_domain', "%s.%s" % (domain_prefix, slapparameter_dict.get('domain'))) %}
{%- endif %}
{%- do slave_publish_dict.__setitem__('domain', slave_instance.get('custom_domain')) %}
{%- do slave_publish_dict.__setitem__('url', "http://%s" % slave_instance.get('custom_domain')) %}
{%- do slave_publish_dict.__setitem__('site_url', "http://%s" % slave_instance.get('custom_domain')) %}
{%- do slave_publish_dict.__setitem__('secure_access', 'https://%s' % slave_instance.get('custom_domain')) %}
{%- set host_list = slave_instance.get('server-alias', '').split() %}
{%- if slave_instance.get('custom_domain') not in host_list %}
{%- do host_list.append(slave_instance.get('custom_domain')) %}
{%- endif %}
{%- do slave_instance.__setitem__('host_list', host_list) %}
{%- do slave_instance.__setitem__('type', slave_instance.get('type', '')) %}
{%- set websocket_path_list = [] %}
{%- for websocket_path in slave_instance.get('websocket-path-list', '').split() %}
{%- set websocket_path = websocket_path.strip('/') %}
{#- Unquote the path, so %20 and similar can be represented correctly #}
{%- set websocket_path = urllib_module.unquote(websocket_path.strip()) %}
{%- if websocket_path %}
{%- do websocket_path_list.append(websocket_path) %}
{%- endif %}
{%- endfor %}
{%- do slave_instance.__setitem__('websocket-path-list', websocket_path_list) %}
{%- do slave_instance.__setitem__('enable_h2', not configuration['global-disable-http2'] and slave_instance['enable-http2']) %}
{%- if slave_instance['type'] in ['notebook', 'websocket'] %}
{# websocket style needs http 1.1 max #}
{%- do slave_instance.__setitem__('enable_h2', False) %}
{%- endif %}
{%- do slave_instance.__setitem__('default-path', slave_instance.get('default-path', '').strip('/') | urlencode) %}
[slave-log-directory-dict]
{{slave_reference}} = {{ slave_log_folder }}
[slave-password]
{{ slave_reference }} = {{ '${' + slave_password_section + ':passwd}' }}
[slave-htpasswd]
{{ slave_reference }} = {{ '${' + slave_htpasswd_section + ':file}' }}
{#- Set slave logrotate entry #}
[{{slave_log_directory_section}}]
recipe = slapos.cookbook:mkdirectory
log-directory = {{ '${slave-log-directory-dict:' + slave_reference + '}' }}
[{{slave_logrotate_section}}]
<= logrotate-entry-base
name = ${:_buildout_section_name_}
log = {{slave_parameter_dict.get('access_log')}} {{slave_parameter_dict.get('error_log')}} {{slave_parameter_dict.get('backend_log')}}
backup = {{ '${' + slave_log_directory_section + ':log-directory}' }}
rotate-num = {{ dumps('' ~ configuration['rotate-num']) }}
# disable delayed compression, as log filenames shall be stable
delaycompress =
{#- integrate current logs inside #}
[{{slave_ln_section}}]
recipe = plone.recipe.command
stop-on-error = false
log-directory = {{ '${' + slave_logrotate_section + ':backup}' }}
command = ln -sf {{slave_parameter_dict.get('error_log')}} ${:log-directory}/error.log && ln -sf {{slave_parameter_dict.get('access_log')}} ${:log-directory}/access.log && ln -sf {{slave_parameter_dict.get('backend_log')}} ${:log-directory}/backend.log
{#- Set password for slave #}
[{{slave_password_section}}]
recipe = slapos.cookbook:generate.password
storage-path = {{caddy_configuration_directory}}/.{{slave_reference}}.passwd
bytes = 8
[{{ slave_htpasswd_section }}]
recipe = plone.recipe.command
{#- Can be stopped on error, as does not rely on self provided service #}
stop-on-error = True
file = {{ caddy_configuration_directory }}/.{{ slave_reference }}.htpasswd
{#- update-command is not needed, as if the ${:password} would change, the whole part will be recalculated #}
password = {{ '${' + slave_password_section + ':passwd}' }}
command = {{ software_parameter_dict['htpasswd'] }} -cb ${:file} {{ slave_reference.lower() }} ${:password}
{#- ################################################## #}
{#- Set Slave Certificates if needed #}
{#- Set certificate key for custom configuration #}
{%- set cert_name = slave_reference.replace('-','.') + '.pem' %}
{%- set certificate = '%s/%s' % (autocert, cert_name) %}
{%- do slave_parameter_dict.__setitem__('certificate', certificate )%}
{#- Set ssl certificates for each slave #}
{%- for cert_name in ('ssl_csr', 'ssl_proxy_ca_crt', 'health-check-failover-ssl-proxy-ca-crt')%}
{%- set cert_file_key = 'path_to_' + cert_name %}
{%- if cert_name in slave_instance %}
{%- set cert_title = '%s-%s' % (slave_reference, cert_name.replace('ssl_', '')) %}
{%- set cert_file = '/'.join([custom_ssl_directory, cert_title.replace('-','.')]) %}
{%- do part_list.append(cert_title) %}
{%- do slave_parameter_dict.__setitem__(cert_name, cert_file) %}
{%- do slave_instance.__setitem__(cert_file_key, cert_file) %}
{#- Store certificates on fs #}
[{{ cert_title }}]
< = jinja2-template-base
url = {{ empty_template }}
output = {{ cert_file }}
extra-context =
key content {{ cert_title + '-config:value' }}
{#- BBB: SlapOS Master non-zero knowledge BEGIN #}
{#- Store certificate in config #}
[{{ cert_title + '-config' }}]
value = {{ dumps(slave_instance.get(cert_name)) }}
{%- else %}
{%- do slave_instance.__setitem__(cert_file_key, None) %}
{%- endif %} {#- if cert_name in slave_instance #}
{%- endfor %}
{#- Set Up Certs #}
{%- if 'ssl_key' in slave_instance and 'ssl_crt' in slave_instance %}
{%- set cert_title = '%s-crt' % (slave_reference) %}
{%- set cert_file = '/'.join([directory['bbb-ssl-dir'], cert_title.replace('-','.')]) %}
{%- do kedifa_updater_mapping.append((key_download_url, certificate, cert_file)) %}
{%- do part_list.append(cert_title) %}
{%- do slave_parameter_dict.__setitem__("ssl_crt", cert_file) %}
[{{cert_title}}]
< = jinja2-template-base
url = {{ empty_template }}
output = {{ cert_file }}
cert-content = {{ dumps(slave_instance.get('ssl_crt') + '\n' + slave_instance.get('ssl_ca_crt', '') + '\n' + slave_instance.get('ssl_key')) }}
extra-context =
key content :cert-content
{%- else %}
{%- do kedifa_updater_mapping.append((key_download_url, certificate, caddy_configuration['master-certificate'])) %}
{%- endif %}
{#- BBB: SlapOS Master non-zero knowledge END #}
{#- ########################################## #}
{#- Set Slave Configuration #}
[{{ slave_configuration_section_name }}]
certificate = {{ certificate }}
https_port = {{ dumps('' ~ configuration['port']) }}
http_port = {{ dumps('' ~ configuration['plain_http_port']) }}
local_ipv4 = {{ dumps('' ~ instance_parameter_dict['ipv4-random']) }}
version-hash = {{ version_hash }}
node-id = {{ node_id }}
{%- for key, value in slave_instance.iteritems() %}
{%- if value is not none %}
{{ key }} = {{ dumps(value) }}
{%- endif %}
{%- endfor %}
[{{ slave_section_title }}]
< = jinja2-template-base
output = {{ caddy_configuration_directory }}/${:filename}
url = {{ template_default_slave_configuration }}
extra-context =
section slave_parameter {{ slave_configuration_section_name }}
filename = {{ '%s.conf' % slave_reference }}
{{ '\n' }}
{%- set monitor_ipv6_test = slave_instance.get('monitor-ipv6-test', '') %}
{%- if monitor_ipv6_test %}
{%- set monitor_ipv6_section_title = 'check-%s-ipv6-packet-list-test' % slave_instance.get('slave_reference') %}
{%- do part_list.append(monitor_ipv6_section_title) %}
[{{ monitor_ipv6_section_title }}]
<= monitor-promise-base
promise = check_icmp_packet_lost
name = {{ monitor_ipv6_section_title }}.py
config-address = {{ dumps(monitor_ipv6_test) }}
# promise frequency in minutes (2 times/day)
config-frequency = 720
{%- endif %}
{%- set monitor_ipv4_test = slave_instance.get('monitor-ipv4-test', '') %}
{%- if monitor_ipv4_test %}
{%- set monitor_ipv4_section_title = 'check-%s-ipv4-packet-list-test' % slave_instance.get('slave_reference') %}
{%- do part_list.append(monitor_ipv4_section_title) %}
[{{ monitor_ipv4_section_title }}]
<= monitor-promise-base
promise = check_icmp_packet_lost
name = {{ monitor_ipv4_section_title }}.py
config-address = {{ dumps(monitor_ipv4_test) }}
config-ipv4 = true
# promise frequency in minutes (2 times/day)
config-frequency = 720
{%- endif %}
{#- ############################### #}
{#- Publish Slave Information #}
{%- if not configuration['extra_slave_instance_list'] %}
{%- set publish_section_title = 'publish-%s-connection-information' % slave_instance.get('slave_reference') %}
{%- do part_list.append(publish_section_title) %}
[{{ publish_section_title }}]
recipe = slapos.cookbook:publish
{%- for key, value in slave_publish_dict.iteritems() %}
{{ key }} = {{ value }}
{%- endfor %}
{%- else %}
{%- do slave_instance_information_list.append(slave_publish_dict) %}
{%- endif %}
{%- do frontend_slave_list.append(slave_instance) %}
{%- if slave_type != 'redirect' %}
{%- do backend_slave_list.append(slave_instance) %}
{%- endif %}
{%- endfor %} {# Slave iteration ends for slave_instance in slave_instance_list #}
{%- do part_list.append('slave-introspection') %}
{#- ############################################## #}
{#- ## Prepare virtualhost for slaves using cache #}
{#- Define IPv6 to IPV4 tunneling #}
[tunnel-6to4-base]
recipe = slapos.cookbook:wrapper
ipv4 = ${slap-configuration:ipv4-random}
ipv6 = ${slap-configuration:ipv6-random}
wrapper-path = {{ directory['service'] }}/6tunnel-${:ipv6-port}
command-line = {{ software_parameter_dict['sixtunnel'] }}/bin/6tunnel -6 -4 -d -l ${:ipv6} ${:ipv6-port} ${:ipv4} ${:ipv4-port}
hash-existing-files = ${buildout:directory}/software_release/buildout.cfg
[tunnel-6to4-base-http_port]
<= tunnel-6to4-base
ipv4-port = {{ configuration['plain_http_port'] }}
ipv6-port = {{ configuration['plain_http_port'] }}
[tunnel-6to4-base-https_port]
<= tunnel-6to4-base
ipv4-port = {{ configuration['port'] }}
ipv6-port = {{ configuration['port'] }}
[slave-introspection-parameters]
local-ipv4 = {{ dumps(instance_parameter_dict['ipv4-random']) }}
global-ipv6 = {{ dumps(global_ipv6) }}
https-port = {{ frontend_configuration['slave-introspection-https-port'] }}
ip-access-certificate = {{ frontend_configuration.get('ip-access-certificate') }}
nginx-mime = {{ software_parameter_dict['nginx_mime'] }}
access-log = {{ dumps(caddy_configuration['slave-introspection-access-log']) }}
error-log = {{ dumps(caddy_configuration['slave-introspection-error-log']) }}
var = {{ directory['slave-introspection-var'] }}
pid = {{ caddy_configuration['slave-introspection-pid-file'] }}
[slave-introspection-config]
<= jinja2-template-base
url = {{ software_parameter_dict['template_slave_introspection_httpd_nginx'] }}
output = {{ frontend_configuration['slave-introspection-configuration'] }}
extra-context =
section slave_log_directory slave-log-directory-dict
section slave_htpasswd slave-htpasswd
section parameter_dict slave-introspection-parameters
[slave-introspection]
recipe = slapos.cookbook:wrapper
command-line = {{ software_parameter_dict['nginx'] }}
-c ${slave-introspection-config:output}
wrapper-path = {{ directory['service'] }}/slave-instrospection-nginx
hash-existing-files = ${buildout:directory}/software_release/buildout.cfg
{#- Publish information for the instance #}
[publish-caddy-information]
recipe = slapos.cookbook:publish.serialised
{%- if configuration['extra_slave_instance_list'] %}
{#- sort_keys are important in order to avoid shuffling parameters on each run #}
slave-instance-information-list = {{ json_module.dumps(slave_instance_information_list, sort_keys=True) }}
{%- endif %}
monitor-base-url = {{ monitor_base_url }}
kedifa-csr-url = ${expose-csr:url}/${expose-csr-link-csr-kedifa:filename}
backend-client-csr-url = ${expose-csr:url}/${expose-csr-link-csr-backend-haproxy:filename}
csr-certificate = ${expose-csr-certificate-get:certificate}
{%- set furled = furl_module.furl(backend_haproxy_configuration['statistic-frontend-secure_access']) %}
{%- do furled.set(username = backend_haproxy_configuration['statistic-username']) %}
{%- do furled.set(password = backend_haproxy_configuration['statistic-password']) %}
{%- do furled.set(path = '/') %}
{#- We unquote, as furl quotes automatically, but there is buildout value on purpose like ${...:...} in the passwod #}
{%- set statistic_url = urlparse_module.unquote(furled.tostr()) %}
backend-haproxy-statistic-url = {{ statistic_url }}
{#- sort_keys are important in order to avoid shuffling parameters on each run #}
node-information-json = {{ json_module.dumps(node_information, sort_keys=True) }}
[kedifa-updater]
recipe = slapos.cookbook:wrapper
command-line = {{ software_parameter_dict['kedifa-updater'] }}
--server-ca-certificate {{ kedifa_configuration['ca-certificate'] }}
--identity {{ kedifa_configuration['certificate'] }}
--master-certificate {{ caddy_configuration['master-certificate'] }}
--on-update "{{ caddy_configuration['frontend-graceful-command'] }}"
${kedifa-updater-mapping:file}
{{ kedifa_configuration['kedifa-updater-state-file'] }}
wrapper-path = {{ directory['service'] }}/kedifa-updater
hash-existing-files = ${buildout:directory}/software_release/buildout.cfg
[kedifa-updater-run]
recipe = plone.recipe.command
{#- Can be stopped on error, as does not rely on self provided service but on service which comes from another partition #}
stop-on-error = True
command = {{ software_parameter_dict['kedifa-updater'] }} --prepare-only ${kedifa-updater-mapping:file} --on-update "{{ caddy_configuration['frontend-graceful-command'] }}"
update-command = ${:command}
[kedifa-updater-mapping]
recipe = slapos.recipe.template:jinja2
file = {{ kedifa_configuration['kedifa-updater-mapping-file'] }}
inline =
{%- for mapping in kedifa_updater_mapping %}
{{ mapping[0] }} {{ mapping[1] }} {{ mapping[2] }}
{%- endfor %}
output = ${:file}
##
[backend-haproxy-configuration]
< = jinja2-template-base
url = {{ template_backend_haproxy_configuration }}
output = ${backend-haproxy-config:file}
backend_slave_list = {{ dumps(sorted(backend_slave_list)) }}
extra-context =
key backend_slave_list :backend_slave_list
section configuration backend-haproxy-config
[backend-haproxy-config]
{%- for key, value in backend_haproxy_configuration.items() %}
{{ key }} = {{ value }}
{%- endfor %}
local-ipv4 = {{ dumps('' ~ instance_parameter_dict['ipv4-random']) }}
global-ipv6 = ${slap-configuration:ipv6-random}
request-timeout = {{ dumps('' ~ configuration['request-timeout']) }}
backend-connect-timeout = {{ dumps('' ~ configuration['backend-connect-timeout']) }}
backend-connect-retries = {{ dumps('' ~ configuration['backend-connect-retries']) }}
version-hash = {{ version_hash }}
node-id = {{ node_id }}
[template-expose-csr-link-csr]
recipe = plone.recipe.command
stop-on-error = False
update-command = ${:command}
csr_path = {{ directory['expose-csr'] }}/${:filename}
command =
ln -sf ${:csr} ${:csr_path}
[expose-csr-link-csr-backend-haproxy]
<= template-expose-csr-link-csr
filename = backend-haproxy-csr.pem
csr = {{ backend_haproxy_configuration['csr'] }}
[expose-csr-link-csr-kedifa]
<= template-expose-csr-link-csr
filename = kedifa-csr.pem
csr = {{ kedifa_configuration['csr'] }}
##
[buildout]
extends =
{{ profile_common }}
{{ profile_logrotate_base }}
{{ profile_monitor }}
parts +=
kedifa-updater
kedifa-updater-run
backend-haproxy-configuration
promise-logrotate-setup
{%- for part in part_list %}
{{ ' %s' % part }}
{%- endfor %}
publish-caddy-information
tunnel-6to4-base-http_port
tunnel-6to4-base-https_port
promise-expose-csr-ip-port
cache-access = {{ cache_access }}
[expose-csr-certificate]
recipe = plone.recipe.command
certificate = {{ directory['expose-csr-etc'] }}/certificate.pem
key = {{ directory['expose-csr-etc'] }}/key.pem
{#- Can be stopped on error, as does not rely on self provided service #}
stop-on-error = True
update-command = ${:command}
command =
if ! [ -f ${:key} ] && ! [ -f ${:certificate} ] ; then
openssl req -new -newkey rsa:2048 -sha256 -subj \
"/O={{ expose_csr_organization }}/OU={{ expose_csr_organizational_unit }}/CN=${slap-configuration:ipv6-random}" \
-days 5 -nodes -x509 -keyout ${:key} -out ${:certificate}
fi
[expose-csr-configuration]
ip = ${slap-configuration:ipv6-random}
port = 17001
key = ${expose-csr-certificate:key}
certificate = ${expose-csr-certificate:certificate}
error-log = {{ directory['log'] }}/expose-csr.log
var = {{ directory['expose-csr-var'] }}
pid = {{ directory['var'] }}/nginx-expose-csr.pid
root = {{ directory['expose-csr'] }}
nginx_mime = {{ software_parameter_dict['nginx_mime'] }}
[expose-csr-template]
recipe = slapos.recipe.template:jinja2
output = {{ directory['expose-csr-etc'] }}/nginx.conf
url = {{ template_expose_csr_nginx_conf }}
context =
section configuration expose-csr-configuration
[promise-expose-csr-ip-port]
<= monitor-promise-base
promise = check_socket_listening
name = expose-csr-ip-port-listening.py
config-host = ${expose-csr-configuration:ip}
config-port = ${expose-csr-configuration:port}
[expose-csr]
recipe = slapos.cookbook:wrapper
command-line = {{ software_parameter_dict['nginx'] }}
-c ${expose-csr-template:output}
url = https://[${expose-csr-configuration:ip}]:${expose-csr-configuration:port}
wrapper-path = {{ directory['service'] }}/expose-csr
hash-existing-files = ${buildout:directory}/software_release/buildout.cfg
[expose-csr-certificate-get]
recipe = collective.recipe.shelloutput
commands =
certificate = cat ${expose-csr-certificate:certificate}
[promise-logrotate-setup]
<= monitor-promise-base
promise = check_command_execute
name = ${:_buildout_section_name_}.py
config-command =
${logrotate:wrapper-path} -d
apache-lazy-script-call.sh.in 0000664 0000000 0000000 00000000566 14241130220 0035760 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates #!/bin/bash
PID=$$
PIDFILE={{ pid_file }}
sleep $((RANDOM % 10))
if [ ! -f $PIDFILE ]; then
echo $PID > $PIDFILE
sleep {{ wait_time }}
{{ lazy_command }}
rm -f $PIDFILE
else
ps --pid `cat $PIDFILE` &>/dev/null
if [ $? -eq 0 ]; then
echo "Skipped"
else
echo $PID > $PIDFILE
sleep {{ wait_time }}
{{ lazy_command }}
rm -f $PIDFILE
fi
fi
backend-haproxy-rsyslogd.conf.in 0000664 0000000 0000000 00000002177 14241130220 0036605 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates module(
load="imuxsock"
SysSock.Name="{{ configuration['log-socket'] }}")
# Just simply output the raw line without any additional information, as
# haproxy emits enough information by itself
# Also cut out first empty space in msg, which is related to rsyslogd
# internal and end up cutting on 8k, as it's default of $MaxMessageSize
template(name="rawoutput" type="string" string="%msg:2:8192%\n")
$ActionFileDefaultTemplate rawoutput
$FileCreateMode 0600
$DirCreateMode 0700
$Umask 0022
$WorkDirectory {{ configuration['spool-directory'] }}
# Setup logging per slave, by extracting the slave name from the log stream
{%- set regex = ".*-backend (.*)-http.{0,1}(|-failover)/" %}
template(name="extract_slave_name" type="string" string="%msg:R,ERE,1,FIELD:{{ regex }}--end%")
set $!slave_name = exec_template("extract_slave_name");
template(name="slave_output" type="string" string="{{ configuration['caddy-log-directory'] }}/%$!slave_name%_backend_log")
if (re_match($msg, "{{ regex }}")) then {
action(type="omfile" dynaFile="slave_output")
stop
}
{#- emit all not catched messages to full log file #}
*.* {{ configuration['log-file'] }}
backend-haproxy.cfg.in 0000664 0000000 0000000 00000023610 14241130220 0034546 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates global
pidfile {{ configuration['pid-file'] }}
# master-worker is compatible with foreground with process management
master-worker
log {{ configuration['log-socket'] }} local0
defaults
log global
mode http
option httplog
timeout queue 60s
timeout server {{ configuration['request-timeout'] }}s
timeout client {{ configuration['request-timeout'] }}s
timeout connect {{ configuration['backend-connect-timeout'] }}s
retries {{ configuration['backend-connect-retries'] }}
{#- Allow to start with not resolved yet servers #}
default-server init-addr last,libc,none
{%- set SCHEME_PREFIX_MAPPING = { 'http': 'http_backend', 'https': 'https_backend'} %}
{%- macro frontend_entry(slave_instance, scheme, wildcard) %}
{#- wildcard switch allows to put dangerous entries in the end, as haproxy parses with first match #}
{%- if slave_instance[SCHEME_PREFIX_MAPPING[scheme]]['hostname'] and slave_instance[SCHEME_PREFIX_MAPPING[scheme]]['port'] %}
{%- set matched = {'count': 0} %}
{%- for host in slave_instance['host_list'] %}
{#- Match up to the end or optional port (starting with ':') #}
{#- Please note that this matching is quite sensitive to changes and hard to test, so avoid needless changes #}
{%- if wildcard and host.startswith('*.') %}
{%- do matched.__setitem__('count', matched['count'] + 1) %}
# match wildcard {{ host }}
acl is_{{ slave_instance['slave_reference'] }}_{{ scheme }} hdr_reg(host) -i {{ host[2:] }}($|:.*)
{%- elif not wildcard and not host.startswith('*.') %}
{%- do matched.__setitem__('count', matched['count'] + 1) %}
acl is_{{ slave_instance['slave_reference'] }}_{{ scheme }} hdr_reg(host) -i ^{{ host }}($|:.*)
{%- endif %}
{%- endfor %}
{%- if matched['count'] > 0 %}
{%- if slave_instance[SCHEME_PREFIX_MAPPING[scheme]]['health-check-failover-hostname'] %}
acl is_failover_{{ slave_instance['slave_reference'] }}_{{ scheme }} nbsrv({{ slave_instance['slave_reference'] }}-{{ scheme }}) eq 0
use_backend {{ slave_instance['slave_reference'] }}-{{ scheme }} if is_{{ slave_instance['slave_reference'] }}_{{ scheme }} ! is_failover_{{ slave_instance['slave_reference'] }}_{{ scheme }}
use_backend {{ slave_instance['slave_reference'] }}-{{ scheme }}-failover if is_{{ slave_instance['slave_reference'] }}_{{ scheme }} is_failover_{{ slave_instance['slave_reference'] }}_{{ scheme }}
{%- else %}
use_backend {{ slave_instance['slave_reference'] }}-{{ scheme }} if is_{{ slave_instance['slave_reference'] }}_{{ scheme }}
{%- endif %}
{%- endif %}
{%- endif %}
{%- endmacro %}
# statistic
frontend statistic
bind {{ configuration['global-ipv6']}}:{{ configuration['statistic-port'] }} ssl crt {{ configuration['statistic-certificate'] }}
stats enable
stats uri /
stats show-desc {{ configuration['statistic-identification'] }}
stats auth {{ configuration['statistic-username'] }}:{{ configuration['statistic-password'] }}
stats realm {{ configuration['statistic-identification'] }}
stats scope http-backend
stats scope https-backend
frontend http-backend
bind {{ configuration['local-ipv4'] }}:{{ configuration['http-port'] }}
http-request add-header Via "%HV rapid-cdn-backend-{{ configuration['node-id'] }}-{{ configuration['version-hash'] }}"
http-response add-header Via "%HV rapid-cdn-backend-{{ configuration['node-id'] }}-{{ configuration['version-hash']}}"
{%- for slave_instance in backend_slave_list -%}
{{ frontend_entry(slave_instance, 'http', False) }}
{%- endfor %}
{%- for slave_instance in backend_slave_list -%}
{{ frontend_entry(slave_instance, 'http', True) }}
{%- endfor %}
frontend https-backend
bind {{ configuration['local-ipv4'] }}:{{ configuration['https-port'] }}
{%- for slave_instance in backend_slave_list -%}
{{ frontend_entry(slave_instance, 'https', False) }}
{%- endfor %}
{%- for slave_instance in backend_slave_list -%}
{{ frontend_entry(slave_instance, 'https', True) }}
{%- endfor %}
{%- for slave_instance in backend_slave_list %}
{%- for (scheme, prefix) in SCHEME_PREFIX_MAPPING.items() %}
{%- set info_dict = slave_instance[prefix] %}
{%- if info_dict['hostname'] and info_dict['port'] %}
{%- set ssl_list = [] %}
{%- if info_dict['scheme'] == 'https' %}
{%- if slave_instance['authenticate-to-backend'] %}
{%- do ssl_list.append('crt %s' % (configuration['certificate'],)) %}
{%- endif %}
{%- do ssl_list.append('ssl verify') %}
{%- if slave_instance['ssl_proxy_verify'] %}
{%- if slave_instance['path_to_ssl_proxy_ca_crt'] %}
{%- do ssl_list.append('required ca-file %s' % (slave_instance['path_to_ssl_proxy_ca_crt'],)) %}
{%- else %}
{#- Backend SSL shall be verified, but not CA provided, disallow connection #}
{#- Simply dropping hostname from the dict will result with ignoring it... #}
{%- do info_dict.__setitem__('hostname', '') %}
{%- endif %}
{%- else %}
{%- do ssl_list.append('none') %}
{%- endif %}
{%- endif %}
backend {{ slave_instance['slave_reference'] }}-{{ scheme }}
{%- set hostname = info_dict['hostname'] %}
{%- set port = info_dict['port'] %}
{%- set path_list = [info_dict['path'].rstrip('/')] %}
{%- set query = info_dict['query'] %}
{%- if query %}
{%- do path_list.append(query) %}
{%- endif %}
{%- set path = '?'.join(path_list) %}
{%- if hostname and port or len(info_dict['netloc-list']) > 0 %}
timeout server {{ slave_instance['request-timeout'] }}s
timeout connect {{ slave_instance['backend-connect-timeout'] }}s
retries {{ slave_instance['backend-connect-retries'] }}
{%- set active_check_list = [] %}
{%- set active_check_option_list = [] %}
{%- if slave_instance['health-check'] %}
{%- do active_check_list.append('check') %}
{%- do active_check_list.append('inter %ss' % (slave_instance['health-check-interval'])) %}
{%- do active_check_list.append('rise %s' % (slave_instance['health-check-rise'])) %}
{%- do active_check_list.append('fall %s' % (slave_instance['health-check-fall'])) %}
{%- if slave_instance['health-check-http-method'] != 'CONNECT' %}
{%- do active_check_option_list.append('option httpchk %s %s %s' % (slave_instance['health-check-http-method'], slave_instance['health-check-http-path'] | urlencode, slave_instance['health-check-http-version'])) %}
{%- endif %}
{%- do active_check_option_list.append('timeout check %ss' % (slave_instance['health-check-timeout'])) %}
{%- endif %}
{%- if len(info_dict['netloc-list']) > 0 %}
{%- set counter = {'count': 1} %}
{%- for netloc in info_dict['netloc-list'] %}
server {{ slave_instance['slave_reference'] }}-backend-{{ scheme }}-{{ counter['count'] }} {{ netloc }} {{ ' '.join(ssl_list) }} {{ ' ' + ' '.join(active_check_list)}}
{%- do counter.__setitem__('count', counter['count'] + 1) %}
{%- endfor %}
{%- else %}
server {{ slave_instance['slave_reference'] }}-backend-{{ scheme }} {{ hostname }}:{{ port }} {{ ' '.join(ssl_list) }} {{ ' ' + ' '.join(active_check_list)}}
{%- endif %}
{%- for active_check_option in active_check_option_list %}
{{ active_check_option }}
{%- endfor %}
{%- if path %}
http-request set-path {{ path }}%[path]
{%- endif %}
{%- endif %}
{%- endif %}
{%- if info_dict['health-check-failover-hostname'] and info_dict['health-check-failover-port'] %}
{%- set ssl_list = [] %}
{%- if info_dict['health-check-failover-scheme'] == 'https' %}
{%- if slave_instance['health-check-authenticate-to-failover-backend'] %}
{%- do ssl_list.append('crt %s' % (configuration['certificate'],)) %}
{%- endif %}
{%- do ssl_list.append('ssl verify') %}
{%- if slave_instance['health-check-failover-ssl-proxy-verify'] %}
{%- if slave_instance['path_to_health-check-failover-ssl-proxy-ca-crt'] %}
{%- do ssl_list.append('required ca-file %s' % (slave_instance['path_to_health-check-failover-ssl-proxy-ca-crt'],)) %}
{%- else %}
{#- Backend SSL shall be verified, but not CA provided, disallow connection #}
{#- Simply dropping hostname from the dict will result with ignoring it... #}
{%- do info_dict.__setitem__('health-check-failover-hostname', '') %}
{%- endif %}
{%- else %}
{%- do ssl_list.append('none') %}
{%- endif %}
{%- endif %}
backend {{ slave_instance['slave_reference'] }}-{{ scheme }}-failover
{%- set hostname = info_dict['health-check-failover-hostname'] %}
{%- set port = info_dict['health-check-failover-port'] %}
{%- set path_list = [info_dict['health-check-failover-path'].rstrip('/')] %}
{%- set query = info_dict['health-check-failover-query'] %}
{%- if query %}
{%- do path_list.append(query) %}
{%- endif %}
{%- set path = '?'.join(path_list) %}
{%- if hostname and port %}
{%- if len(info_dict['health-check-netloc-list']) > 0 %}
{%- set counter = {'count': 1} %}
{%- for netloc in info_dict['health-check-netloc-list'] %}
server {{ slave_instance['slave_reference'] }}-backend-{{ scheme }}-{{ counter['count'] }} {{ netloc }} {{ ' '.join(ssl_list) }}
{%- do counter.__setitem__('count', counter['count'] + 1) %}
{%- endfor %}
{%- else %}
server {{ slave_instance['slave_reference'] }}-backend-{{ scheme }} {{ hostname }}:{{ port }} {{ ' '.join(ssl_list) }}
{%- endif %}
timeout connect {{ slave_instance['backend-connect-timeout'] }}s
timeout server {{ slave_instance['request-timeout'] }}s
retries {{ slave_instance['backend-connect-retries'] }}
{%- if path %}
http-request set-path {{ path }}%[path]
{%- endif %}
{%- endif %}
{%- endif %}
{%- endfor %}
{%- endfor %}
configuration-state-script.sh.in 0000664 0000000 0000000 00000000745 14241130220 0036635 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates #!/bin/sh
set -e
SIGNATURE_FILE={{ signature_file }}
NSIGNATURE_FILE={{ signature_file }}.tmp
touch $SIGNATURE_FILE
{{ sha256sum }} {{ path_list }} 2> /dev/null | sort -k 66 > $NSIGNATURE_FILE
if diff "$SIGNATURE_FILE" "$NSIGNATURE_FILE" > /dev/null ; then
# No changes since last run just propagate information
rm -f ${NSIGNATURE_FILE}
exit 1
else
# Changes since last run, so store new value and propagate information
mv "$NSIGNATURE_FILE" "$SIGNATURE_FILE"
exit 0
fi
default-virtualhost.conf.in 0000664 0000000 0000000 00000023677 14241130220 0035700 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates {%- set proxy_append_list = [('', 'Default proxy configuration')] %}
{%- if slave_parameter['prefer-gzip-encoding-to-backend'] %}
{%- do proxy_append_list.append(('prefer-gzip', 'Proxy which always overrides Accept-Encoding to gzip if such is found')) %}
{%- endif %} {#- if slave_parameter['prefer-gzip-encoding-to-backend'] #}
{%- if slave_parameter['path'].strip().strip('/') %}
{%- set zope_path = slave_parameter['path'].strip().strip('/') ~ '/' %}
{%- else %}
{%- set zope_path = '' %}
{%- endif %}
{%- set http_host_list = [] %}
{%- set https_host_list = [] %}
{%- for host in slave_parameter['host_list'] %}
{%- do http_host_list.append('http://%s:%s' % (host, slave_parameter['http_port'] )) %}
{%- do https_host_list.append('https://%s:%s' % (host, slave_parameter['https_port'] )) %}
{%- endfor %} {#- for host in slave_parameter['host_list'] #}
{%- macro proxy_header() %}
timeout {{ slave_parameter['request-timeout'] }}s
# force reset of X-Forwarded-For
header_upstream X-Forwarded-For {remote}
# workaround for lost connection to haproxy by reconnecting
try_duration 3s
try_interval 250ms
header_upstream +Via "{proto} rapid-cdn-frontend-{{ slave_parameter['node-id'] }}-{{ slave_parameter['version-hash'] }}"
{%- if not slave_parameter['disable-via-header'] %}
header_downstream +Via "{proto} rapid-cdn-frontend-{{ slave_parameter['node-id'] }}-{{ slave_parameter['version-hash'] }}"
{%- endif %}
{%- endmacro %} {# proxy_header #}
{%- macro hsts_header(tls) %}
{%- if tls %}
{%- if slave_parameter['strict-transport-security'] > 0 %}
{%- set strict_transport_security = ['max-age=%i' % (slave_parameter['strict-transport-security'],)] %}
{%- if slave_parameter['strict-transport-security-sub-domains'] %}
{%- do strict_transport_security.append('; includeSubDomains') %}
{%- endif %}
{%- if slave_parameter['strict-transport-security-preload'] %}
{%- do strict_transport_security.append('; preload') %}
{%- endif %}
header_downstream Strict-Transport-Security "{{ ''.join(strict_transport_security) }}"
{%- endif %}
{%- endif %}
{%- endmacro %} {# hsts_header #}
{%- for tls in [True, False] %}
{%- if tls %}
{%- set backend_url = slave_parameter.get('backend-https-url', slave_parameter['backend-http-url']) %}
# SSL enabled hosts
{{ https_host_list|join(', ') }} {
{%- else %}
{%- set backend_url = slave_parameter['backend-http-url'] %}
# SSL-disabled hosts
{{ http_host_list|join(', ') }} {
{%- endif %}
bind {{ slave_parameter['local_ipv4'] }}
{%- if tls %}
tls {{ slave_parameter['certificate'] }} {{ slave_parameter['certificate'] }} {
{%- if slave_parameter['ciphers'] %}
ciphers {{ slave_parameter['ciphers'] }}
{%- endif %}
{%- if slave_parameter['enable_h2'] %}
# Allow http2
alpn h2 http/1.1
{%- else %} {#- if slave_parameter['enable_h2'] #}
# Disallow HTTP2
alpn http/1.1
{%- endif %} {#- if slave_parameter['enable_h2'] #}
} {# tls #}
{%- endif %} {#- if tls #}
log / {{ slave_parameter['access_log'] }} "{remote} - {>REMOTE_USER} [{when}] \"{method} {uri} {proto}\" {status} {size} \"{>Referer}\" \"{>User-Agent}\" {latency_ms}" {
rotate_size 10000000
}
errors {{ slave_parameter['error_log'] }} {
rotate_size 10000000
}
{%- if not (slave_parameter['type'] == 'zope' and backend_url) %}
{% if slave_parameter['prefer-gzip-encoding-to-backend'] and not (not tls and slave_parameter['https-only']) %}
rewrite {
regexp (.*)
if {>Accept-Encoding} match "(^gzip,.*|.*, gzip,.*|.*, gzip$|^gzip$)"
to /prefer-gzip{1}
}
rewrite {
regexp (.*)
if {>Accept-Encoding} not_match "(^gzip,.*|.*, gzip,.*|.*, gzip$|^gzip$)"
to {1}
}
{% elif slave_parameter['type'] not in ['notebook', 'websocket'] %}
rewrite {
regexp (.*)
to {1}
}
{% endif %} {# elif slave_parameter['type'] != 'notebook' #}
{%- endif %} {#- if not (slave_parameter['type'] == 'zope' and backend_url) #}
{%- if not tls and slave_parameter['https-only'] %}
# Enforced redirection to SSL-enabled host
redir 302 {
/ https://{host}{rewrite_uri}
}
{%- elif slave_parameter['type'] == 'zope' and backend_url %}
# Zope configuration
{%- for (proxy_name, proxy_comment) in proxy_append_list %}
# {{ proxy_comment }}
proxy "/{{ proxy_name }}" {{ backend_url }} {
{{ proxy_header() }}
{{ hsts_header(tls) }}
{%- if proxy_name == 'prefer-gzip' %}
without /prefer-gzip
header_upstream Accept-Encoding gzip
{%- endif %} {#- if proxy_name == 'prefer-gzip' #}
{%- for disabled_cookie in slave_parameter['disabled-cookie-list'] %}
# Remove cookie {{ disabled_cookie }} from client Cookies
header_upstream Cookie "(.*)(^{{ disabled_cookie }}=[^;]*; |; {{ disabled_cookie }}=[^;]*|^{{ disabled_cookie }}=[^;]*$)(.*)" "$1 $3"
{%- endfor %} {#- for disabled_cookie in slave_parameter['disabled-cookie-list'] #}
{%- if slave_parameter['disable-via-header'] %}
header_downstream -Via
{%- endif %} {#- if slave_parameter['disable-via-header'] #}
{%- if slave_parameter['disable-no-cache-request'] %}
header_upstream -Cache-Control
header_upstream -Pragma
{%- endif %} {#- if slave_parameter['disable-no-cache-request'] #}
transparent
} {# proxy #}
{%- endfor %} {#- for (proxy_name, proxy_comment) in proxy_append_list #}
{%- if slave_parameter['default-path'] %}
redir 301 {
if {path} is /
/ {scheme}://{host}/{{ slave_parameter['default-path'] }}
} {# redir #}
{%- endif %} {#- if slave_parameter['default-path'] #}
{%- if slave_parameter['prefer-gzip-encoding-to-backend'] and not (not tls and slave_parameter['https-only']) %}
rewrite {
regexp (.*)
if {>Accept-Encoding} match "(^gzip,.*|.*, gzip,.*|.*, gzip$|^gzip$)"
{%- if tls %}
to /prefer-gzip/VirtualHostBase/{scheme}%2F{hostonly}:{{ slave_parameter['virtualhostroot-https-port'] }}%2F{{ zope_path }}VirtualHostRoot/{1}
{%- else %}
to /prefer-gzip/VirtualHostBase/{scheme}%2F{hostonly}:{{ slave_parameter['virtualhostroot-http-port'] }}%2F{{ zope_path }}VirtualHostRoot/{1}
{%- endif %}
}
rewrite {
regexp (.*)
if {>Accept-Encoding} not_match "(^gzip,.*|.*, gzip,.*|.*, gzip$|^gzip$)"
{%- if tls %}
to /VirtualHostBase/{scheme}%2F{hostonly}:{{ slave_parameter['virtualhostroot-https-port'] }}%2F{{ zope_path }}VirtualHostRoot/{1}
{%- else %}
to /VirtualHostBase/{scheme}%2F{hostonly}:{{ slave_parameter['virtualhostroot-http-port'] }}%2F{{ zope_path }}VirtualHostRoot/{1}
{%- endif %}
}
{%- else %}
rewrite {
regexp (.*)
{%- if tls %}
to /VirtualHostBase/{scheme}%2F{hostonly}:{{ slave_parameter['virtualhostroot-https-port'] }}%2F{{ zope_path }}VirtualHostRoot/{1}
{%- else %}
to /VirtualHostBase/{scheme}%2F{hostonly}:{{ slave_parameter['virtualhostroot-http-port'] }}%2F{{ zope_path }}VirtualHostRoot/{1}
{%- endif %}
} {# rewrite #}
{%- endif %} {#- if slave_parameter['prefer-gzip-encoding-to-backend'] #}
{%- elif slave_parameter['type'] == 'redirect' %}
{%- if backend_url %}
# Redirect configuration
redir 302 {
/ {{ backend_url }}{rewrite_uri}
}
{%- endif %}
{%- elif slave_parameter['type'] == 'notebook' %}
proxy / {{ backend_url }} {
{{ proxy_header() }}
{{ hsts_header(tls) }}
transparent
}
rewrite {
regexp "/(api/kernels/[^/]+/(channels|iopub|shell|stdin)|terminals/websocket)/?"
to /proxy/{1}
}
proxy /proxy/ {{ backend_url }} {
{{ proxy_header() }}
{{ hsts_header(tls) }}
transparent
websocket
without /proxy/
}
{%- elif slave_parameter['type'] == 'websocket' %}
{%- if slave_parameter['websocket-path-list'] %}
proxy / {{ backend_url }} {
{{ proxy_header() }}
{{ hsts_header(tls) }}
{%- if slave_parameter['websocket-transparent'] %}
transparent
{%- else %}
header_upstream Host {host}
{%- endif %}
}
{%- for websocket_path in slave_parameter['websocket-path-list'] %}
proxy "/{{ websocket_path }}" {{ backend_url }} {
{{ proxy_header() }}
{{ hsts_header(tls) }}
websocket
{%- if slave_parameter['websocket-transparent'] %}
transparent
{%- else %}
header_upstream Host {host}
{%- endif %}
}
{%- endfor %}
{%- else %}
proxy / {{ backend_url }} {
{{ proxy_header() }}
{{ hsts_header(tls) }}
websocket
{%- if slave_parameter['websocket-transparent'] %}
transparent
{%- else %}
header_upstream Host {host}
{%- endif %}
}
{%- endif %}
{%- else %} {#- if slave_parameter['type'] == 'zope' and backend_url #}
# Default configuration
{%- if slave_parameter['default-path'] %}
redir 301 {
if {path} is /
/ {scheme}://{host}/{{ slave_parameter['default-path'] }}
} {# redir #}
{%- endif %} {#- if slave_parameter['default-path'] #}
{%- if backend_url %}
{%- for (proxy_name, proxy_comment) in proxy_append_list %}
# {{ proxy_comment }}
proxy "/{{ proxy_name }}" {{ backend_url }} {
{{ proxy_header() }}
{{ hsts_header(tls) }}
{%- if proxy_name == 'prefer-gzip' %}
without /prefer-gzip
header_upstream Accept-Encoding gzip
{%- endif %} {#- if proxy_name == 'prefer-gzip' #}
{%- for disabled_cookie in slave_parameter['disabled-cookie-list'] %}
# Remove cookie {{ disabled_cookie }} from client Cookies
header_upstream Cookie "(.*)(^{{ disabled_cookie }}=[^;]*; |; {{ disabled_cookie }}=[^;]*|^{{ disabled_cookie }}=[^;]*$)(.*)" "$1 $3"
{%- endfor %} {#- for disabled_cookie in slave_parameter['disabled-cookie-list'] #}
{%- if slave_parameter['disable-via-header'] %}
header_downstream -Via
{%- endif %} {#- if slave_parameter['disable-via-header'] #}
{%- if slave_parameter['disable-no-cache-request'] %}
header_upstream -Cache-Control
header_upstream -Pragma
{%- endif %} {#- if slave_parameter['disable-no-cache-request'] #}
transparent
} {# proxy #}
{%- endfor %} {#- for (proxy_name, proxy_comment) in proxy_append_list #}
{%- endif %} {#- if backend_url #}
{%- endif %} {#- if slave_parameter['type'] == 'zope' and backend_url #}
} {# https_host_list|join(', ') #}
{%- endfor %} {#- for tls in [True, False] #}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates/empty.in 0000664 0000000 0000000 00000000016 14241130220 0032141 0 ustar 00root root 0000000 0000000 {{ content }}
expose-csr-nginx.conf.in 0000664 0000000 0000000 00000001636 14241130220 0035072 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates daemon off;
pid {{ configuration['pid'] }};
error_log {{ configuration['error-log'] }};
events {
}
http {
include {{ configuration['nginx_mime'] }};
server {
server_name_in_redirect off;
port_in_redirect off;
error_log {{ configuration['error-log'] }};
access_log /dev/null;
listen [{{ configuration['ip'] }}]:{{ configuration['port'] }} ssl;
ssl_certificate {{ configuration['certificate'] }};
ssl_certificate_key {{ configuration['key'] }};
default_type application/octet-stream;
client_body_temp_path {{ configuration['var'] }} 1 2;
proxy_temp_path {{ configuration['var'] }} 1 2;
fastcgi_temp_path {{ configuration['var'] }} 1 2;
uwsgi_temp_path {{ configuration['var'] }} 1 2;
scgi_temp_path {{ configuration['var'] }} 1 2;
location / {
alias {{ configuration['root'] }}/;
autoindex off;
sendfile on;
sendfile_max_chunk 1m;
}
}
}
graceful-script.sh.in 0000664 0000000 0000000 00000000262 14241130220 0034432 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates #!/bin/sh
set -e
if {{ caddy_configuration_state }} ; then
echo "Reloading.."
{{ graceful_reload_command }}
else
echo "Nothing changed, so nothing to reload"
exit 0
fi
notfound.html 0000664 0000000 0000000 00000000655 14241130220 0033127 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates
Instance not found
The instance has not been found
The reasons of this could be:
the instance does not exists or the URL is incorrect
in this case please check the URL
the instance has been stopped
in this case please check in the SlapOS Master if the instance is started or wait a bit for it to start
replicate-publish-slave-information.cfg.in 0000664 0000000 0000000 00000007611 14241130220 0040541 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates {% set part_list = [] %}
{% set slave_information_dict = {} %}
# regroup slave information from all frontends
{% for frontend, slave_list_raw in slave_information.iteritems() %}
{% if slave_list_raw %}
{% set slave_list = json_module.loads(slave_list_raw) %}
{% else %}
{% set slave_list = [] %}
{% endif %}
{% for slave_dict in slave_list %}
{% set slave_reference = slave_dict.pop('slave-reference') %}
{% set log_access_url = slave_dict.pop('log-access', '') %}
{% set current_slave_dict = slave_information_dict.get(slave_reference, {}) %}
{% do current_slave_dict.update(slave_dict) %}
{% set log_access_list = current_slave_dict.get('log-access-urls', []) %}
{% do log_access_list.append( frontend + ': ' + log_access_url) %}
{% do current_slave_dict.__setitem__(
'log-access-urls',
log_access_list
) %}
{% do current_slave_dict.__setitem__(
'replication_number',
current_slave_dict.get('replication_number', 0) + 1
) %}
{% do slave_information_dict.__setitem__(slave_reference, current_slave_dict) %}
{% endfor %}
{% endfor %}
{% for slave_reference, rejected_info_list in rejected_slave_information['rejected-slave-dict'].iteritems() %}
{% if slave_reference not in slave_information_dict %}
{% do slave_information_dict.__setitem__(slave_reference, {}) %}
{% endif %}
{% do slave_information_dict[slave_reference].__setitem__('request-error-list', json_module.dumps(rejected_info_list)) %}
{% endfor %}
{% for slave_reference, warning_info_list in warning_slave_information['warning-slave-dict'].iteritems() %}
{% if slave_reference not in slave_information_dict %}
{% do slave_information_dict.__setitem__(slave_reference, {}) %}
{% endif %}
{% do slave_information_dict[slave_reference].__setitem__('warning-list', json_module.dumps(warning_info_list)) %}
{% endfor %}
{% for slave_reference, kedifa_dict in json_module.loads(slave_kedifa_information).iteritems() %}
{% if slave_reference not in rejected_slave_information['rejected-slave-dict'] %}
{% if slave_reference not in slave_information_dict %}
{% do slave_information_dict.__setitem__(slave_reference, {}) %}
{% endif %}
{% do slave_information_dict[slave_reference].__setitem__('key-generate-auth-url', kedifa_dict['key-generate-auth-url']) %}
{% do slave_information_dict[slave_reference].__setitem__('key-upload-url', kedifa_dict['key-upload-url']) %}
{% do slave_information_dict[slave_reference].__setitem__('kedifa-caucase-url', kedifa_dict['kedifa-caucase-url']) %}
{% endif %}
{% endfor %}
# Publish information for each slave
{% set active_slave_instance_list = json_module.loads(active_slave_instance_dict['active-slave-instance-list']) %}
{% for slave_reference, slave_information in slave_information_dict.iteritems() %}
{# Filter out destroyed, so not existing anymore, slaves #}
{# Note: This functionality is not yet covered by tests, please modify with care #}
{% if slave_reference in active_slave_instance_list %}
{% set publish_section_title = 'publish-%s' % slave_reference %}
{% do part_list.append(publish_section_title) %}
[{{ publish_section_title }}]
recipe = slapos.cookbook:publish
-slave-reference = {{ slave_reference }}
{% set log_access_url = slave_information.pop('log-access-urls', None) %}
{% if log_access_url %}
{# sort_keys are important in order to avoid shuffling parameters on each run #}
log-access-url = {{ dumps(json_module.dumps(log_access_url, sort_keys=True)) }}
{% endif %}
{% for key, value in slave_information.iteritems() %}
{{ key }} = {{ dumps(value) }}
{% endfor %}
{% endif %}
{% for frontend_key, frontend_value in frontend_information.iteritems() %}
{{ frontend_key }} = {{ frontend_value }}
{% endfor %}
{% endfor %}
[buildout]
extends = {{ profile_common }}
parts =
{% for part in part_list %}
{{ ' %s' % part }}
{% endfor %}
rotate-script.sh.in 0000664 0000000 0000000 00000000702 14241130220 0034137 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates #!/bin/sh
set -e
LOG_DIR={{ log_dir }}
LOGROTATE_DIR={{ rotate_dir }}
PATTERN={{ pattern }}
COMPRESS={{ xz_binary }}
KEEP_DAYS={{ keep_days }}
# Move out ${PATTERN} files
find ${LOG_DIR} -maxdepth 1 -type f -name ${PATTERN} -exec mv {} ${LOGROTATE_DIR}/ \;
# Compress
find ${LOGROTATE_DIR} -maxdepth 1 -type f -name ${PATTERN} -exec ${COMPRESS} -9 {} \;
# Retent old files
find ${LOGROTATE_DIR} -maxdepth 1 -type f -mtime +${KEEP_DAYS} -delete
slave-introspection-httpd-nginx.conf.in 0000664 0000000 0000000 00000002466 14241130220 0040135 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates # Access log configuration
daemon off;
pid {{ parameter_dict['pid'] }};
error_log {{ parameter_dict['error-log'] }};
events {
}
http {
include {{ parameter_dict['nginx-mime'] }};
server {
server_name_in_redirect off;
port_in_redirect off;
error_log {{ parameter_dict['error-log'] }};
access_log {{ parameter_dict['access-log'] }};
listen [{{ parameter_dict['global-ipv6'] }}]:{{ parameter_dict['https-port'] }} ssl;
listen {{ parameter_dict['local-ipv4'] }}:{{ parameter_dict['https-port'] }} ssl;
ssl_certificate {{ parameter_dict['ip-access-certificate'] }};
ssl_certificate_key {{ parameter_dict['ip-access-certificate'] }};
default_type application/octet-stream;
client_body_temp_path {{ parameter_dict['var'] }} 1 2;
proxy_temp_path {{ parameter_dict['var'] }} 1 2;
fastcgi_temp_path {{ parameter_dict['var'] }} 1 2;
uwsgi_temp_path {{ parameter_dict['var'] }} 1 2;
scgi_temp_path {{ parameter_dict['var'] }} 1 2;
{% for slave, directory in slave_log_directory.iteritems() %}
location /{{ slave }} {
alias {{ directory }};
autoindex on;
autoindex_format json;
sendfile on;
sendfile_max_chunk 1m;
auth_basic "Log Access {{ slave }}";
auth_basic_user_file "{{ slave_htpasswd[slave] | trim }}";
}
{% endfor %}
}
}
trafficserver/ 0000775 0000000 0000000 00000000000 14241130220 0033244 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates logging.yaml.jinja2 0000664 0000000 0000000 00000000464 14241130220 0036736 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates/trafficserver logging:
formats:
- name: squid
format: '% % % %/% % % % % % %/% %'
logs:
- filename: squid
format: squid
mode: ascii
rolling_enabled: 1
rolling_interval_sec: 86400
rolling_offset_hr: 0
records.config.jinja2 0000664 0000000 0000000 00000031747 14241130220 0037264 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates/trafficserver ##############################################################################
# *NOTE*: All options covered in this file should be documented in the docs:
#
# https://docs.trafficserver.apache.org/records.config
##############################################################################
##############################################################################
# SlapOS Specific configuration not available in default records.config
##############################################################################
CONFIG proxy.config.proxy_name STRING {{ ats_configuration['hostname'] }}
CONFIG proxy.config.local_state_dir STRING {{ ats_directory['local-state'] }}
CONFIG proxy.config.bin_path STRING {{ ats_directory['bin_path'] }}
CONFIG proxy.config.env_prep STRING example_prep.sh
CONFIG proxy.config.syslog_facility STRING LOG_DAEMON
CONFIG proxy.config.output.logfile STRING traffic.out
CONFIG proxy.config.admin.user_id STRING {{ '#%s' % os_module.geteuid() }}
LOCAL proxy.local.incoming_ip_to_bind STRING {{ ats_configuration['local-ip'] }}
CONFIG proxy.config.log.logfile_dir STRING {{ ats_directory['log'] }}
# Never change Server header
CONFIG proxy.config.http.response_server_enabled INT 0
# Handle Via header
CONFIG proxy.config.http.insert_request_via_str INT 1
CONFIG proxy.config.http.request_via_str STRING rapid-cdn-cache-{{ ats_configuration['node-id'] }}-{{ ats_configuration['version-hash'] }}
CONFIG proxy.config.http.insert_response_via_str INT 1
CONFIG proxy.config.http.response_via_str STRING rapid-cdn-cache-{{ ats_configuration['node-id'] }}-{{ ats_configuration['version-hash'] }}
# Implement RFC 5861 with core
CONFIG proxy.config.http.cache.open_write_fail_action INT 2
CONFIG proxy.config.body_factory.template_sets_dir STRING {{ ats_configuration['templates-dir'] }}
# Simulate stale-if-error (not supported by TrafficServer), by using internal
# mechanism
# This results with replying last know non-5xx response until max_stale_age is reached
# ignoring max-age returned by the server
CONFIG proxy.config.http.negative_revalidating_enabled INT 1
# max_stale_age set here means that for 1 week since last correct response
# the response will be sent by the system
CONFIG proxy.config.http.cache.max_stale_age INT 604800
# negative_revalidating_lifetime just adds Expires header calculated as
# Expires = Date + negative_revalidating_lifetime
# for case when backend replies 5xx, and Age > max-age and Age < max_stale_age
# and that's not needed, so drop this behaviour
CONFIG proxy.config.http.negative_revalidating_lifetime INT 0
##############################################################################
# Thread configurations. Docs:
# https://docs.trafficserver.apache.org/records.config#thread-variables
##############################################################################
CONFIG proxy.config.exec_thread.autoconfig INT 1
CONFIG proxy.config.exec_thread.autoconfig.scale FLOAT 1.5
CONFIG proxy.config.exec_thread.limit INT 2
CONFIG proxy.config.accept_threads INT 1
CONFIG proxy.config.task_threads INT 2
CONFIG proxy.config.cache.threads_per_disk INT 8
CONFIG proxy.config.exec_thread.affinity INT 1
##############################################################################
# Specify server addresses and ports to bind for HTTP and HTTPS. Docs:
# https://docs.trafficserver.apache.org/records.config#proxy.config.http.server_ports
##############################################################################
CONFIG proxy.config.http.server_ports STRING {{ ats_configuration['local-ip'] + ':' + ats_configuration['input-port'] }}
##############################################################################
# Parent proxy configuration, in addition to these settings also see parent.config. Docs:
# https://docs.trafficserver.apache.org/records.config#parent-proxy-configuration
# https://docs.trafficserver.apache.org/en/latest/admin-guide/files/parent.config.en.html
##############################################################################
CONFIG proxy.config.http.parent_proxy.retry_time INT 300
CONFIG proxy.config.http.parent_proxy.connect_attempts_timeout INT 30
CONFIG proxy.config.http.forward.proxy_auth_to_parent INT 0
CONFIG proxy.config.http.uncacheable_requests_bypass_parent INT 1
##############################################################################
# HTTP connection timeouts (secs). Docs:
# https://docs.trafficserver.apache.org/records.config#http-connection-timeouts
##############################################################################
CONFIG proxy.config.http.keep_alive_no_activity_timeout_in INT 120
CONFIG proxy.config.http.keep_alive_no_activity_timeout_out INT 120
CONFIG proxy.config.http.transaction_no_activity_timeout_in INT {{ ats_configuration['request-timeout'] }}
CONFIG proxy.config.http.transaction_no_activity_timeout_out INT {{ ats_configuration['request-timeout'] }}
CONFIG proxy.config.http.transaction_active_timeout_in INT 900
CONFIG proxy.config.http.transaction_active_timeout_out INT 0
CONFIG proxy.config.http.accept_no_activity_timeout INT 120
CONFIG proxy.config.net.default_inactivity_timeout INT 86400
##############################################################################
# Origin server connect attempts. Docs:
# https://docs.trafficserver.apache.org/records.config#origin-server-connect-attempts
##############################################################################
# workaround for lost connection to haproxy by reconnecting
CONFIG proxy.config.http.connect_attempts_max_retries INT 3
CONFIG proxy.config.http.connect_attempts_max_retries_dead_server INT 1
CONFIG proxy.config.http.connect_attempts_rr_retries INT 3
CONFIG proxy.config.http.connect_attempts_timeout INT {{ ats_configuration['request-timeout'] }}
CONFIG proxy.config.http.post_connect_attempts_timeout INT {{ ats_configuration['request-timeout'] }}
CONFIG proxy.config.http.down_server.cache_time INT 60
CONFIG proxy.config.http.down_server.abort_threshold INT 10
##############################################################################
# Negative response caching, for redirects and errors. Docs:
# https://docs.trafficserver.apache.org/records.config#negative-response-caching
##############################################################################
CONFIG proxy.config.http.negative_caching_enabled INT 0
CONFIG proxy.config.http.negative_caching_lifetime INT 1800
##############################################################################
# Proxy users variables. Docs:
# https://docs.trafficserver.apache.org/records.config#proxy-user-variables
##############################################################################
CONFIG proxy.config.http.insert_client_ip INT 0
CONFIG proxy.config.http.insert_squid_x_forwarded_for INT 0
##############################################################################
# Security. Docs:
# https://docs.trafficserver.apache.org/records.config#security
##############################################################################
CONFIG proxy.config.http.push_method_enabled INT 0
##############################################################################
# Enable / disable HTTP caching. Useful for testing, but also as an
# overridable (per remap) config
##############################################################################
CONFIG proxy.config.http.cache.http INT 1
##############################################################################
# Cache control. Docs:
# https://docs.trafficserver.apache.org/records.config#cache-control
# https://docs.trafficserver.apache.org/en/latest/admin-guide/files/cache.config.en.html
##############################################################################
CONFIG proxy.config.http.cache.ignore_client_cc_max_age INT 1
CONFIG proxy.config.http.normalize_ae INT 0
CONFIG proxy.config.http.cache.cache_responses_to_cookies INT 1
CONFIG proxy.config.http.cache.cache_urls_that_look_dynamic INT 1
# https://docs.trafficserver.apache.org/records.config#proxy-config-http-cache-when-to-revalidate
CONFIG proxy.config.http.cache.when_to_revalidate INT 0
# https://docs.trafficserver.apache.org/records.config#proxy-config-http-cache-required-headers
CONFIG proxy.config.http.cache.required_headers INT 2
##############################################################################
# Heuristic cache expiration. Docs:
# https://docs.trafficserver.apache.org/records.config#heuristic-expiration
##############################################################################
CONFIG proxy.config.http.cache.heuristic_min_lifetime INT 3600
CONFIG proxy.config.http.cache.heuristic_max_lifetime INT 86400
CONFIG proxy.config.http.cache.heuristic_lm_factor FLOAT 0.10
##############################################################################
# Network. Docs:
# https://docs.trafficserver.apache.org/records.config#network
##############################################################################
CONFIG proxy.config.net.connections_throttle INT 30000
CONFIG proxy.config.net.max_connections_in INT 30000
CONFIG proxy.config.net.max_requests_in INT 10000
##############################################################################
# RAM and disk cache configurations. Docs:
# https://docs.trafficserver.apache.org/records.config#ram-cache
# https://docs.trafficserver.apache.org/en/latest/admin-guide/files/storage.config.en.html
##############################################################################
CONFIG proxy.config.cache.ram_cache.size INT {{ ats_configuration.get('ram-cache-size', '1G') }}
CONFIG proxy.config.cache.ram_cache_cutoff INT 4194304
# https://docs.trafficserver.apache.org/records.config#proxy-config-cache-limits-http-max-alts
CONFIG proxy.config.cache.limits.http.max_alts INT 5
# https://docs.trafficserver.apache.org/records.config#proxy-config-cache-max-doc-size
CONFIG proxy.config.cache.max_doc_size INT 0
CONFIG proxy.config.cache.min_average_object_size INT 8000
##############################################################################
# Logging Config. Docs:
# https://docs.trafficserver.apache.org/records.config#logging-configuration
# https://docs.trafficserver.apache.org/en/latest/admin-guide/files/logging.yaml.en.html
##############################################################################
CONFIG proxy.config.log.logging_enabled INT 3
CONFIG proxy.config.log.max_space_mb_for_logs INT 25000
CONFIG proxy.config.log.max_space_mb_headroom INT 1000
CONFIG proxy.config.log.rolling_enabled INT 1
CONFIG proxy.config.log.rolling_interval_sec INT 86400
CONFIG proxy.config.log.rolling_size_mb INT 10
CONFIG proxy.config.log.auto_delete_rolled_files INT 1
CONFIG proxy.config.log.periodic_tasks_interval INT 5
##############################################################################
# These settings control remapping, and if the proxy allows (open) forward proxy or not. Docs:
# https://docs.trafficserver.apache.org/records.config#url-remap-rules
# https://docs.trafficserver.apache.org/en/latest/admin-guide/files/remap.config.en.html
##############################################################################
CONFIG proxy.config.url_remap.remap_required INT 1
# https://docs.trafficserver.apache.org/records.config#proxy-config-url-remap-pristine-host-hdr
CONFIG proxy.config.url_remap.pristine_host_hdr INT 1
# https://docs.trafficserver.apache.org/records.config#reverse-proxy
CONFIG proxy.config.reverse_proxy.enabled INT 1
##############################################################################
# SSL Termination. Docs:
# https://docs.trafficserver.apache.org/records.config#client-related-configuration
# https://docs.trafficserver.apache.org/en/latest/admin-guide/files/ssl_multicert.config.en.html
##############################################################################
CONFIG proxy.config.ssl.client.verify.server.properties STRING NONE
CONFIG proxy.config.ssl.client.CA.cert.filename STRING NULL
CONFIG proxy.config.ssl.server.cipher_suite STRING ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA
##############################################################################
# Debugging. Docs:
# https://docs.trafficserver.apache.org/records.config#diagnostic-logging-configuration
##############################################################################
CONFIG proxy.config.diags.debug.enabled INT 0
CONFIG proxy.config.diags.debug.tags STRING http|dns
# ToDo: Undocumented
CONFIG proxy.config.dump_mem_info_frequency INT 0
CONFIG proxy.config.http.slow.log.threshold INT 0
storage.config.jinja2 0000664 0000000 0000000 00000004375 14241130220 0037264 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates/trafficserver #
# storage.config - Storage Configuration file
#
# Documentation:
# https://docs.trafficserver.apache.org/en/latest/admin-guide/files/storage.config.en.html
#
# The storage configuration is a list of all the storage to
# be used by the cache.
#
#
#############################################################
# Using a file for the cache storage
#
#
#
# Where 'pathname' is full path to the directory where you want
# the cache-file to live and 'size' is size in bytes
#
# Example: 128MB cache file(/opt/slapgrid/shared/trafficserver/4e294bc2396af9b22bee270330a1f340/var/trafficserver/cache.db)
# /opt/slapgrid/shared/trafficserver/4e294bc2396af9b22bee270330a1f340/var/trafficserver 128M
#
# Example: 144MB cache file(/opt/slapgrid/shared/trafficserver/4e294bc2396af9b22bee270330a1f340/var/trafficserver/cache.db)
# assuming prefix of '/opt/slapgrid/shared/trafficserver/4e294bc2396af9b22bee270330a1f340'
# var/trafficserver 150994944
#
# Example: 512MB cache file(/opt/slapgrid/shared/trafficserver/4e294bc2396af9b22bee270330a1f340/var/trafficserver/cache.db)
# assuming prefix of '/opt/slapgrid/shared/trafficserver/4e294bc2396af9b22bee270330a1f340'
# var/trafficserver 512M
#
#
#############################################################
## O_DIRECT Specific Configuration ##
#############################################################
#
# Examples: Using O_DIRECT on disks (Linux kernel >= 2.6.3,
# FreeBSD > 5.3)
#
# /dev/disc/by-id/[Insert_ID_Here_12345] # Linux
# /dev/disc/by-path/[Insert-Path-Here:12:34:56-1.0.0.0] # Linux
#
# /dev/ada1 # FreeBSD
#
# Note that disks are identified by id or path. This is to prevent changes
# by the kernel (which could occur if a disk was simply described as /dev/sda, sdb, etc.).
#
# Also note that when using these raw devices in O_DIRECT mode, you
# do not need to specify the partition size. It's automatically
# detected.
#
# A small default cache (256MB). This is set to allow for the regression test to succeed
# most likely you'll want to use a larger cache. And, we definitely recommend the use
# of raw devices for production caches.
{{ ats_configuration.get("cache-path") }} {{ ats_configuration.get("disk-cache-size") }}
validate-script.sh.in 0000664 0000000 0000000 00000000633 14241130220 0034435 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates #!/bin/sh
set -e
LAST_STATE_FILE={{ last_state_file }}
# force validation each 2 hours
old_found=1
if [ -f $LAST_STATE_FILE ] ; then
old_found=$(find $LAST_STATE_FILE -mmin +120 | wc -l)
fi
if [ "$old_found" -eq 1 ] || {{ configuration_state_command }} ; then
# do not catch errors during validation
set +e
{{ validate_command }}
echo $? > $LAST_STATE_FILE
set -e
fi
exit `cat $LAST_STATE_FILE`
wrapper.in 0000664 0000000 0000000 00000000044 14241130220 0032405 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/templates #!${dash-output:dash}
{{ content }}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/ 0000775 0000000 0000000 00000000000 14241130220 0027437 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/setup.py 0000664 0000000 0000000 00000004373 14241130220 0031160 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.caddy-frontend'
setup(name=name,
version=version,
description="Test for SlapOS' Caddy Frontend",
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'slapos.cookbook',
'slapos.libnetworkcache',
'erp5.util',
'requests >= 2.20.0', # needed for recent SSL certificate fixes
'urllib3 >= 1.24', # needed for recent SSL certificate fixes
# ipaddress is patching IPAddress so IPv6 in SSL certificates
# match works
'ipaddress >= 1.0.22',
'requests-toolbelt',
'supervisor',
# caucase needed to connect to the KeDiFa caucase
'caucase',
'cryptography',
'backports.lzma',
],
zip_safe=True,
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test.py 0000664 0000000 0000000 00000706447 14241130220 0031012 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import glob
import os
import requests
import httplib
from requests_toolbelt.adapters import source
import json
import multiprocessing
import subprocess
from unittest import skip
import ssl
from BaseHTTPServer import HTTPServer
from BaseHTTPServer import BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
import time
import tempfile
import ipaddress
import StringIO
import gzip
import base64
import re
from slapos.recipe.librecipe import generateHashFromFiles
import xml.etree.ElementTree as ET
import urlparse
import socket
import sys
import logging
import random
import string
from slapos.slap.standalone import SlapOSNodeInstanceError
import caucase.client
import caucase.utils
try:
import lzma
except ImportError:
from backports import lzma
import datetime
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
from slapos.testing.utils import findFreeTCPPort
from slapos.testing.utils import getPromisePluginParameterDict
if int(os.environ.get('SLAPOS_HACK_STANDALONE', '0')) == 1:
SlapOSInstanceTestCase = object
else:
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'software.cfg')))
# XXX Keep using slapos node instance --all, because of missing promises
SlapOSInstanceTestCase.slap._force_slapos_node_instance_all = True
# ports chosen to not collide with test systems
HTTP_PORT = '11080'
HTTPS_PORT = '11443'
CAUCASE_PORT = '15090'
KEDIFA_PORT = '15080'
# IP to originate requests from
# has to be not partition one
SOURCE_IP = '127.0.0.1'
# IP on which test run, in order to mimic HTTP[s] access
TEST_IP = os.environ['SLAPOS_TEST_IPV4']
# "--resolve" inspired from https://stackoverflow.com/a/44378047/9256748
DNS_CACHE = {}
def add_custom_dns(domain, port, ip):
port = int(port)
key = (domain, port)
value = (socket.AF_INET, 1, 6, '', (ip, port))
DNS_CACHE[key] = [value]
def new_getaddrinfo(*args):
return DNS_CACHE[args[:2]]
# for development: debugging logs and install Ctrl+C handler
if os.environ.get('SLAPOS_TEST_DEBUG'):
logging.basicConfig(level=logging.DEBUG)
import unittest
unittest.installHandler()
def der2pem(der):
certificate = x509.load_der_x509_certificate(der, default_backend())
return certificate.public_bytes(serialization.Encoding.PEM)
# comes from https://stackoverflow.com/a/21788372/9256748
def patch_broken_pipe_error():
"""Monkey Patch BaseServer.handle_error to not write
a stacktrace to stderr on broken pipe.
https://stackoverflow.com/a/7913160"""
from SocketServer import BaseServer
handle_error = BaseServer.handle_error
def my_handle_error(self, request, client_address):
type, err, tb = sys.exc_info()
# there might be better ways to detect the specific erro
if repr(err) == "error(32, 'Broken pipe')":
pass
else:
handle_error(self, request, client_address)
BaseServer.handle_error = my_handle_error
patch_broken_pipe_error()
def createKey():
key = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend())
key_pem = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
return key, key_pem
def createSelfSignedCertificate(name_list):
key, key_pem = createKey()
subject_alternative_name_list = x509.SubjectAlternativeName(
[x509.DNSName(unicode(q)) for q in name_list]
)
subject = issuer = x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u'Test Self Signed Certificate'),
])
certificate = x509.CertificateBuilder().subject_name(
subject
).issuer_name(
issuer
).add_extension(
subject_alternative_name_list,
critical=False,
).public_key(
key.public_key()
).serial_number(
x509.random_serial_number()
).not_valid_before(
datetime.datetime.utcnow() - datetime.timedelta(days=2)
).not_valid_after(
datetime.datetime.utcnow() + datetime.timedelta(days=5)
).sign(key, hashes.SHA256(), default_backend())
certificate_pem = certificate.public_bytes(serialization.Encoding.PEM)
return key, key_pem, certificate, certificate_pem
def createCSR(common_name, ip=None):
key, key_pem = createKey()
subject_alternative_name_list = []
if ip is not None:
subject_alternative_name_list.append(
x509.IPAddress(ipaddress.ip_address(unicode(ip)))
)
csr = x509.CertificateSigningRequestBuilder().subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, unicode(common_name)),
]))
if len(subject_alternative_name_list):
csr = csr.add_extension(
x509.SubjectAlternativeName(subject_alternative_name_list),
critical=False
)
csr = csr.sign(key, hashes.SHA256(), default_backend())
csr_pem = csr.public_bytes(serialization.Encoding.PEM)
return key, key_pem, csr, csr_pem
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
pass
class CertificateAuthority(object):
def __init__(self, common_name):
self.key, self.key_pem = createKey()
public_key = self.key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, unicode(common_name)),
]))
builder = builder.issuer_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, unicode(common_name)),
]))
builder = builder.not_valid_before(
datetime.datetime.utcnow() - datetime.timedelta(days=2))
builder = builder.not_valid_after(
datetime.datetime.utcnow() + datetime.timedelta(days=30))
builder = builder.serial_number(x509.random_serial_number())
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True,
)
self.certificate = builder.sign(
private_key=self.key, algorithm=hashes.SHA256(),
backend=default_backend()
)
self.certificate_pem = self.certificate.public_bytes(
serialization.Encoding.PEM)
def signCSR(self, csr):
builder = x509.CertificateBuilder(
subject_name=csr.subject,
extensions=csr.extensions,
issuer_name=self.certificate.subject,
not_valid_before=datetime.datetime.utcnow() - datetime.timedelta(days=1),
not_valid_after=datetime.datetime.utcnow() + datetime.timedelta(days=30),
serial_number=x509.random_serial_number(),
public_key=csr.public_key(),
)
certificate = builder.sign(
private_key=self.key,
algorithm=hashes.SHA256(),
backend=default_backend()
)
return certificate, certificate.public_bytes(serialization.Encoding.PEM)
def subprocess_status_output(*args, **kwargs):
prc = subprocess.Popen(
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
*args,
**kwargs)
out, err = prc.communicate()
return prc.returncode, out
def subprocess_output(*args, **kwargs):
return subprocess_status_output(*args, **kwargs)[1]
def isHTTP2(domain):
curl_command = 'curl --http2 -v -k -H "Host: %(domain)s" ' \
'https://%(domain)s:%(https_port)s/ '\
'--resolve %(domain)s:%(https_port)s:%(ip)s' % dict(
ip=TEST_IP, domain=domain, https_port=HTTPS_PORT)
prc = subprocess.Popen(
curl_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = prc.communicate()
assert prc.returncode == 0, "Problem running %r. Output:\n%s\nError:\n%s" % (
curl_command, out, err)
return 'Using HTTP2, server supports' in err
class TestDataMixin(object):
def getTrimmedProcessInfo(self):
return '\n'.join(sorted([
'%(group)s:%(name)s %(statename)s' % q for q
in self.callSupervisorMethod('getAllProcessInfo')
if q['name'] != 'watchdog' and q['group'] != 'watchdog']))
def assertTestData(self, runtime_data, hash_value_dict=None, msg=None):
if hash_value_dict is None:
hash_value_dict = {}
filename = '%s-%s.txt' % (self.id().replace('zz_', ''), 'CADDY')
test_data_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'test_data', filename)
try:
test_data = open(test_data_file).read().strip()
except IOError:
test_data = ''
for hash_type, hash_value in hash_value_dict.items():
runtime_data = runtime_data.replace(hash_value, '{hash-%s}' % (
hash_type),)
maxDiff = self.maxDiff
self.maxDiff = None
longMessage = self.longMessage
self.longMessage = True
try:
self.assertMultiLineEqual(
test_data,
runtime_data,
msg=msg
)
except AssertionError:
if os.environ.get('SAVE_TEST_DATA', '0') == '1':
open(test_data_file, 'w').write(runtime_data.strip() + '\n')
raise
finally:
self.maxDiff = maxDiff
self.longMessage = longMessage
def _test_file_list(self, slave_dir_list, IGNORE_PATH_LIST=None):
if IGNORE_PATH_LIST is None:
IGNORE_PATH_LIST = []
runtime_data = []
for slave_var in glob.glob(os.path.join(self.instance_path, '*')):
for entry in os.walk(os.path.join(slave_var, *slave_dir_list)):
for filename in entry[2]:
path = os.path.join(
entry[0][len(self.instance_path) + 1:], filename)
if not any([path.endswith(q) for q in IGNORE_PATH_LIST]):
runtime_data.append(path)
runtime_data = '\n'.join(sorted(runtime_data))
self.assertTestData(runtime_data)
# convince test to be run last; it's a hack, but log files shall be checked
# after all other tests had chance to execute
def zz_test_file_list_log(self):
self._test_file_list(['var', 'log'], [
# no control at all when cron would kick in, ignore it
'cron.log',
# appears late and is quite unstable, no need to assert
'trafficserver/.diags.log.meta',
'trafficserver/.manager.log.meta',
'trafficserver/.squid.log.meta',
'trafficserver/diags.log',
'trafficserver/squid.log',
# not important, appears sometimes
'trafficserver/.error.log.meta',
'trafficserver/error.log',
'trafficserver/.traffic.out.meta',
'trafficserver/traffic.out',
])
def test_file_list_run(self):
self._test_file_list(['var', 'run'], [
# can't be sure regarding its presence
'caddy_configuration_last_state',
'validate_configuration_state_signature',
# run by cron from time to time
'monitor/monitor-collect.pid',
# no control regarding if it would or not be running
'monitor/monitor-bootstrap.pid',
])
def test_file_list_etc_cron_d(self):
self._test_file_list(['etc', 'cron.d'])
def test_file_list_plugin(self):
self._test_file_list(['etc', 'plugin'], ['.pyc'])
def test_supervisor_state(self):
# give a chance for etc/run scripts to finish
time.sleep(1)
hash_file_list = [os.path.join(
self.computer_partition_root_path, 'software_release/buildout.cfg')]
hash_value_dict = {
'generic': generateHashFromFiles(hash_file_list),
}
for caddy_wrapper_path in glob.glob(os.path.join(
self.instance_path, '*', 'bin', 'caddy-wrapper')):
partition_id = caddy_wrapper_path.split('/')[-3]
hash_value_dict[
'caddy-%s' % (partition_id)] = generateHashFromFiles(
[caddy_wrapper_path] + hash_file_list
)
for backend_haproxy_wrapper_path in glob.glob(os.path.join(
self.instance_path, '*', 'bin', 'backend-haproxy-wrapper')):
partition_id = backend_haproxy_wrapper_path.split('/')[-3]
hash_value_dict[
'backend-haproxy-%s' % (partition_id)] = generateHashFromFiles(
[backend_haproxy_wrapper_path] + hash_file_list
)
for rejected_slave_publish_path in glob.glob(os.path.join(
self.instance_path, '*', 'etc', 'nginx-rejected-slave.conf')):
partition_id = rejected_slave_publish_path.split('/')[-3]
rejected_slave_pem_path = os.path.join(
self.instance_path, partition_id, 'etc', 'rejected-slave.pem')
hash_value_dict[
'rejected-slave-publish'
] = generateHashFromFiles(
[rejected_slave_publish_path, rejected_slave_pem_path] + hash_file_list
)
runtime_data = self.getTrimmedProcessInfo()
self.assertTestData(runtime_data, hash_value_dict=hash_value_dict)
def fakeHTTPSResult(domain, path, port=HTTPS_PORT,
headers=None, cookies=None, source_ip=SOURCE_IP):
if headers is None:
headers = {}
# workaround request problem of setting Accept-Encoding
# https://github.com/requests/requests/issues/2234
headers.setdefault('Accept-Encoding', 'dummy')
# Headers to tricks the whole system, like rouge user would do
headers.setdefault('X-Forwarded-For', '192.168.0.1')
headers.setdefault('X-Forwarded-Proto', 'irc')
headers.setdefault('X-Forwarded-Port', '17')
# Expose some Via to show how nicely it arrives to the backend
headers.setdefault('Via', 'http/1.1 clientvia')
session = requests.Session()
if source_ip is not None:
new_source = source.SourceAddressAdapter(source_ip)
session.mount('http://', new_source)
session.mount('https://', new_source)
socket_getaddrinfo = socket.getaddrinfo
try:
add_custom_dns(domain, port, TEST_IP)
socket.getaddrinfo = new_getaddrinfo
# Use a prepared request, to disable path normalization.
# We need this because some test checks requests with paths like
# /test-path/deep/.././deeper but we don't want the client to send
# /test-path/deeper
# See also https://github.com/psf/requests/issues/5289
url = 'https://%s:%s/%s' % (domain, port, path)
req = requests.Request(
method='GET',
url=url,
headers=headers,
cookies=cookies,
)
prepped = req.prepare()
prepped.url = url
return session.send(prepped, verify=False, allow_redirects=False)
finally:
socket.getaddrinfo = socket_getaddrinfo
def fakeHTTPResult(domain, path, port=HTTP_PORT,
headers=None, source_ip=SOURCE_IP):
if headers is None:
headers = {}
# workaround request problem of setting Accept-Encoding
# https://github.com/requests/requests/issues/2234
headers.setdefault('Accept-Encoding', 'dummy')
# Headers to tricks the whole system, like rouge user would do
headers.setdefault('X-Forwarded-For', '192.168.0.1')
headers.setdefault('X-Forwarded-Proto', 'irc')
headers.setdefault('X-Forwarded-Port', '17')
# Expose some Via to show how nicely it arrives to the backend
headers.setdefault('Via', 'http/1.1 clientvia')
headers['Host'] = '%s:%s' % (domain, port)
session = requests.Session()
if source_ip is not None:
new_source = source.SourceAddressAdapter(source_ip)
session.mount('http://', new_source)
session.mount('https://', new_source)
# Use a prepared request, to disable path normalization.
url = 'http://%s:%s/%s' % (TEST_IP, port, path)
req = requests.Request(method='GET', url=url, headers=headers)
prepped = req.prepare()
prepped.url = url
return session.send(prepped, allow_redirects=False)
class TestHandler(BaseHTTPRequestHandler):
identification = None
configuration = {}
# override Server header response
server_version = "TestBackend"
sys_version = ""
def log_message(self, *args):
if os.environ.get('SLAPOS_TEST_DEBUG'):
return BaseHTTPRequestHandler.log_message(self, *args)
else:
return
def do_DELETE(self):
config = self.configuration.pop(self.path, None)
if config is None:
self.send_response(204)
self.end_headers()
else:
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(json.dumps({self.path: config}, indent=2))
def do_PUT(self):
config = {
'status_code': self.headers.dict.get('x-reply-status-code', '200')
}
prefix = 'x-reply-header-'
length = len(prefix)
for key, value in self.headers.dict.items():
if key.startswith(prefix):
header = '-'.join([q.capitalize() for q in key[length:].split('-')])
config[header] = value.strip()
if 'x-reply-body' in self.headers.dict:
config['Body'] = base64.b64decode(self.headers.dict['x-reply-body'])
config['X-Drop-Header'] = self.headers.dict.get('x-drop-header')
self.configuration[self.path] = config
self.send_response(201)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(json.dumps({self.path: config}, indent=2))
def do_POST(self):
return self.do_GET()
def do_GET(self):
config = self.configuration.get(self.path, None)
if config is not None:
config = config.copy()
response = config.pop('Body', None)
status_code = int(config.pop('status_code'))
timeout = int(config.pop('Timeout', '0'))
compress = int(config.pop('Compress', '0'))
drop_header_list = []
for header in (config.pop('X-Drop-Header') or '').split():
drop_header_list.append(header)
header_dict = config
else:
drop_header_list = []
for header in (self.headers.dict.get('x-drop-header') or '').split():
drop_header_list.append(header)
response = None
status_code = 200
timeout = int(self.headers.dict.get('timeout', '0'))
if 'x-maximum-timeout' in self.headers.dict:
maximum_timeout = int(self.headers.dict['x-maximum-timeout'])
timeout = random.randrange(maximum_timeout)
if 'x-response-size' in self.headers.dict:
min_response, max_response = [
int(q) for q in self.headers.dict['x-response-size'].split(' ')]
reponse_size = random.randrange(min_response, max_response)
response = ''.join(
random.choice(string.lowercase) for x in range(reponse_size))
compress = int(self.headers.dict.get('compress', '0'))
header_dict = {}
prefix = 'x-reply-header-'
length = len(prefix)
for key, value in self.headers.dict.items():
if key.startswith(prefix):
header = '-'.join([q.capitalize() for q in key[length:].split('-')])
header_dict[header] = value.strip()
if response is None:
if 'x-reply-body' not in self.headers.dict:
headers_dict = dict()
for header in self.headers.keys():
content = self.headers.getheaders(header)
if len(content) == 0:
headers_dict[header] = None
elif len(content) == 1:
headers_dict[header] = content[0]
else:
headers_dict[header] = content
response = {
'Path': self.path,
'Incoming Headers': headers_dict
}
response = json.dumps(response, indent=2)
else:
response = base64.b64decode(self.headers.dict['x-reply-body'])
time.sleep(timeout)
self.send_response(status_code)
for key, value in header_dict.items():
self.send_header(key, value)
if self.identification is not None:
self.send_header('X-Backend-Identification', self.identification)
if 'Content-Type' not in drop_header_list:
self.send_header("Content-Type", "application/json")
if 'Set-Cookie' not in drop_header_list:
self.send_header('Set-Cookie', 'secured=value;secure')
self.send_header('Set-Cookie', 'nonsecured=value')
if 'Via' not in drop_header_list:
self.send_header('Via', 'http/1.1 backendvia')
if compress:
self.send_header('Content-Encoding', 'gzip')
out = StringIO.StringIO()
# compress with level 0, to find out if in the middle someting would
# like to alter the compression
with gzip.GzipFile(fileobj=out, mode="w", compresslevel=0) as f:
f.write(response)
response = out.getvalue()
self.send_header('Backend-Content-Length', len(response))
if 'Content-Length' not in drop_header_list:
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response)
class HttpFrontendTestCase(SlapOSInstanceTestCase):
# show full diffs, as it is required for proper analysis of problems
maxDiff = None
# minimise partition path
__partition_reference__ = 'T-'
@classmethod
def prepareCertificate(cls):
cls.another_server_ca = CertificateAuthority("Another Server Root CA")
cls.test_server_ca = CertificateAuthority("Test Server Root CA")
key, key_pem, csr, csr_pem = createCSR(
"testserver.example.com", cls._ipv4_address)
_, cls.test_server_certificate_pem = cls.test_server_ca.signCSR(csr)
cls.test_server_certificate_file = tempfile.NamedTemporaryFile(
delete=False
)
cls.test_server_certificate_file.write(
cls.test_server_certificate_pem + key_pem
)
cls.test_server_certificate_file.close()
@classmethod
def startServerProcess(cls):
server = ThreadedHTTPServer(
(cls._ipv4_address, cls._server_http_port),
TestHandler)
server_https = ThreadedHTTPServer(
(cls._ipv4_address, cls._server_https_port),
TestHandler)
server_https.socket = ssl.wrap_socket(
server_https.socket,
certfile=cls.test_server_certificate_file.name,
server_side=True)
cls.backend_url = 'http://%s:%s/' % server.server_address
server_process = multiprocessing.Process(
target=server.serve_forever, name='HTTPServer')
server_process.start()
cls.logger.debug('Started process %s' % (server_process,))
cls.backend_https_url = 'https://%s:%s/' % server_https.server_address
server_https_process = multiprocessing.Process(
target=server_https.serve_forever, name='HTTPSServer')
server_https_process.start()
cls.logger.debug('Started process %s' % (server_https_process,))
class NetlocHandler(TestHandler):
identification = 'netloc'
netloc_a_http = ThreadedHTTPServer(
(cls._ipv4_address, cls._server_netloc_a_http_port),
NetlocHandler)
netloc_a_http_process = multiprocessing.Process(
target=netloc_a_http.serve_forever, name='netloc-a-http')
netloc_a_http_process.start()
netloc_b_http = ThreadedHTTPServer(
(cls._ipv4_address, cls._server_netloc_b_http_port),
NetlocHandler)
netloc_b_http_process = multiprocessing.Process(
target=netloc_b_http.serve_forever, name='netloc-b-http')
netloc_b_http_process.start()
cls.server_process_list = [
server_process,
server_https_process,
netloc_a_http_process,
netloc_b_http_process,
]
@classmethod
def cleanUpCertificate(cls):
if getattr(cls, 'test_server_certificate_file', None) is not None:
os.unlink(cls.test_server_certificate_file.name)
@classmethod
def stopServerProcess(cls):
for process in cls.server_process_list:
if process is not None:
cls.logger.debug('Stopping process %s' % (process,))
process.join(10)
process.terminate()
time.sleep(0.1)
if process.is_alive():
cls.logger.warning(
'Process %s still alive' % (process, ))
def startAuthenticatedServerProcess(self):
master_parameter_dict = self.parseConnectionParameterDict()
caucase_url = master_parameter_dict['backend-client-caucase-url']
ca_certificate = requests.get(caucase_url + '/cas/crt/ca.crt.pem')
assert ca_certificate.status_code == httplib.OK
ca_certificate_file = os.path.join(
self.working_directory, 'ca-backend-client.crt.pem')
with open(ca_certificate_file, 'w') as fh:
fh.write(ca_certificate.text)
class OwnTestHandler(TestHandler):
identification = 'Auth Backend'
server_https_auth = ThreadedHTTPServer(
(self._ipv4_address, self._server_https_auth_port),
OwnTestHandler)
server_https_auth.socket = ssl.wrap_socket(
server_https_auth.socket,
certfile=self.test_server_certificate_file.name,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=ca_certificate_file,
server_side=True)
self.backend_https_auth_url = 'https://%s:%s/' \
% server_https_auth.server_address
self.server_https_auth_process = multiprocessing.Process(
target=server_https_auth.serve_forever, name='HTTPSServerAuth')
self.server_https_auth_process.start()
self.logger.debug('Started process %s' % (self.server_https_auth_process,))
def stopAuthenticatedServerProcess(self):
self.logger.debug('Stopping process %s' % (
self.server_https_auth_process,))
self.server_https_auth_process.join(10)
self.server_https_auth_process.terminate()
time.sleep(0.1)
if self.server_https_auth_process.is_alive():
self.logger.warning(
'Process %s still alive' % (self.server_https_auth_process, ))
@classmethod
def _fetchKedifaCaucaseCaCertificateFile(cls, parameter_dict):
ca_certificate = requests.get(
parameter_dict['kedifa-caucase-url'] + '/cas/crt/ca.crt.pem')
assert ca_certificate.status_code == httplib.OK
cls.kedifa_caucase_ca_certificate_file = os.path.join(
cls.working_directory, 'kedifa-caucase.ca.crt.pem')
open(cls.kedifa_caucase_ca_certificate_file, 'w').write(
ca_certificate.text)
@classmethod
def _fetchBackendClientCaCertificateFile(cls, parameter_dict):
ca_certificate = requests.get(
parameter_dict['backend-client-caucase-url'] + '/cas/crt/ca.crt.pem')
assert ca_certificate.status_code == httplib.OK
cls.backend_client_caucase_ca_certificate_file = os.path.join(
cls.working_directory, 'backend-client-caucase.ca.crt.pem')
open(cls.backend_client_caucase_ca_certificate_file, 'w').write(
ca_certificate.text)
@classmethod
def setUpMaster(cls):
# run partition until AIKC finishes
cls.runComputerPartitionUntil(
cls.untilNotReadyYetNotInMasterKeyGenerateAuthUrl)
parameter_dict = cls.requestDefaultInstance().getConnectionParameterDict()
cls._fetchKedifaCaucaseCaCertificateFile(parameter_dict)
auth = requests.get(
parameter_dict['master-key-generate-auth-url'],
verify=cls.kedifa_caucase_ca_certificate_file)
assert auth.status_code == httplib.CREATED
upload = requests.put(
parameter_dict['master-key-upload-url'] + auth.text,
data=cls.key_pem + cls.certificate_pem,
verify=cls.kedifa_caucase_ca_certificate_file)
assert upload.status_code == httplib.CREATED
cls.runKedifaUpdater()
@classmethod
def runKedifaUpdater(cls):
kedifa_updater = None
for kedifa_updater in sorted(glob.glob(
os.path.join(
cls.instance_path, '*', 'etc', 'service', 'kedifa-updater*'))):
# fetch first kedifa-updater, as by default most of the tests are using
# only one running partition; in case if test does not need
# kedifa-updater this method can be overridden
break
if kedifa_updater is not None:
# try few times kedifa_updater
for i in range(10):
return_code, output = subprocess_status_output(
[kedifa_updater, '--once'])
if return_code == 0:
break
# wait for the other updater to work
time.sleep(2)
# assert that in the worst case last run was correct
assert return_code == 0, output
# give caddy a moment to refresh its config, as sending signal does not
# block until caddy is refreshed
time.sleep(2)
@classmethod
def createWildcardExampleComCertificate(cls):
_, cls.key_pem, _, cls.certificate_pem = createSelfSignedCertificate(
[
'*.customdomain.example.com',
'*.example.com',
'*.alias1.example.com',
])
@classmethod
def runComputerPartitionUntil(cls, until):
max_try = 10
try_num = 1
while True:
if until():
break
if try_num > max_try:
raise ValueError('Failed to run computer partition with %r' % (until,))
try:
cls.slap.waitForInstance()
except Exception:
cls.logger.exception("Error during until run")
try_num += 1
@classmethod
def untilNotReadyYetNotInMasterKeyGenerateAuthUrl(cls):
parameter_dict = cls.requestDefaultInstance().getConnectionParameterDict()
key = 'master-key-generate-auth-url'
if key not in parameter_dict:
return False
if 'NotReadyYet' in parameter_dict[key]:
return False
return True
@classmethod
def callSupervisorMethod(cls, method, *args, **kwargs):
with cls.slap.instance_supervisor_rpc as instance_supervisor:
return getattr(instance_supervisor, method)(*args, **kwargs)
def assertRejectedSlavePromiseEmptyWithPop(self, parameter_dict):
rejected_slave_promise_url = parameter_dict.pop(
'rejected-slave-promise-url')
try:
result = requests.get(rejected_slave_promise_url, verify=False)
if result.text == '':
result_json = {}
else:
result_json = result.json()
self.assertEqual(
{},
result_json
)
except AssertionError:
raise
except Exception as e:
self.fail(e)
def assertResponseHeaders(
self, result, cached=False, via=True, backend_reached=True):
headers = result.headers.copy()
self.assertKeyWithPop('Date', headers)
# drop vary-keys
headers.pop('Connection', None)
headers.pop('Content-Length', None)
headers.pop('Keep-Alive', None)
headers.pop('Transfer-Encoding', None)
if backend_reached:
self.assertEqual('TestBackend', headers.pop('Server', ''))
via_id = '%s-%s' % (
self.node_information_dict['node-id'],
self.node_information_dict['version-hash-history'].keys()[0])
if via:
self.assertIn('Via', headers)
if cached:
self.assertEqual(
'http/1.1 backendvia, '
'HTTP/1.1 rapid-cdn-backend-%(via_id)s, '
'http/1.0 rapid-cdn-cache-%(via_id)s, '
'HTTP/1.1 rapid-cdn-frontend-%(via_id)s' % dict(via_id=via_id),
headers.pop('Via')
)
else:
self.assertEqual(
'http/1.1 backendvia, '
'HTTP/1.1 rapid-cdn-backend-%(via_id)s, '
'HTTP/1.1 rapid-cdn-frontend-%(via_id)s' % dict(via_id=via_id),
headers.pop('Via')
)
else:
self.assertNotIn('Via', headers)
return headers
def assertLogAccessUrlWithPop(self, parameter_dict):
log_access_url = parameter_dict.pop('log-access-url')
self.assertTrue(len(log_access_url) >= 1)
# check only the first one, as second frontend will be stopped
log_access = log_access_url[0]
entry = log_access.split(': ')
if len(entry) != 2:
self.fail('Cannot parse %r' % (log_access,))
frontend, url = entry
result = requests.get(url, verify=False)
self.assertEqual(
httplib.OK,
result.status_code,
'While accessing %r of %r the status code was %r' % (
url, frontend, result.status_code))
# check that the result is correct JSON, which allows to access
# information about all logs
self.assertEqual(
'application/json',
result.headers['Content-Type']
)
self.assertEqual(
sorted([q['name'] for q in result.json()]),
['access.log', 'backend.log', 'error.log'])
self.assertEqual(
httplib.OK,
requests.get(url + 'access.log', verify=False).status_code
)
self.assertEqual(
httplib.OK,
requests.get(url + 'error.log', verify=False).status_code
)
# assert only for few tests, as backend log is not available for many of
# them, as it's created on the fly
for test_name in [
'test_url', 'test_auth_to_backend', 'test_compressed_result']:
if self.id().endswith(test_name):
self.assertEqual(
httplib.OK,
requests.get(url + 'backend.log', verify=False).status_code
)
def assertKedifaKeysWithPop(self, parameter_dict, prefix=''):
generate_auth_url = parameter_dict.pop('%skey-generate-auth-url' % (
prefix,))
upload_url = parameter_dict.pop('%skey-upload-url' % (prefix,))
kedifa_ipv6_base = 'https://[%s]:%s' % (self._ipv6_address, KEDIFA_PORT)
base = '^' + kedifa_ipv6_base.replace(
'[', r'\[').replace(']', r'\]') + '/.{32}'
self.assertRegexpMatches(
generate_auth_url,
base + r'\/generateauth$'
)
self.assertRegexpMatches(
upload_url,
base + r'\?auth=$'
)
kedifa_caucase_url = parameter_dict.pop('kedifa-caucase-url')
self.assertEqual(
kedifa_caucase_url,
'http://[%s]:%s' % (self._ipv6_address, CAUCASE_PORT),
)
return generate_auth_url, upload_url
def assertNodeInformationWithPop(self, parameter_dict):
key = 'caddy-frontend-1-node-information-json'
node_information_json_dict = {}
for k in parameter_dict.keys():
if k.startswith('caddy-frontend') and k.endswith(
'node-information-json'):
node_information_json_dict[k] = parameter_dict.pop(k)
self.assertEqual(
[key],
node_information_json_dict.keys()
)
node_information_dict = json.loads(node_information_json_dict[key])
self.assertIn("node-id", node_information_dict)
self.assertIn("version-hash-history", node_information_dict)
self.node_information_dict = node_information_dict
def assertBackendHaproxyStatisticUrl(self, parameter_dict):
url_key = 'caddy-frontend-1-backend-haproxy-statistic-url'
backend_haproxy_statistic_url_dict = {}
for key in parameter_dict.keys():
if key.startswith('caddy-frontend') and key.endswith(
'backend-haproxy-statistic-url'):
backend_haproxy_statistic_url_dict[key] = parameter_dict.pop(key)
self.assertEqual(
[url_key],
backend_haproxy_statistic_url_dict.keys()
)
backend_haproxy_statistic_url = backend_haproxy_statistic_url_dict[url_key]
result = requests.get(
backend_haproxy_statistic_url,
verify=False,
)
self.assertEqual(httplib.OK, result.status_code)
self.assertIn('testing partition 0', result.text)
self.assertIn('Statistics Report for HAProxy', result.text)
def assertKeyWithPop(self, key, d):
self.assertTrue(key in d, 'Key %r is missing in %r' % (key, d))
d.pop(key)
def assertEqualResultJson(self, result, key, value):
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertTrue(key in j, 'No key %r in %s' % (key, j))
self.assertEqual(value, j[key])
def patchRequests(self):
HTTPResponse = requests.packages.urllib3.response.HTTPResponse
HTTPResponse.orig__init__ = HTTPResponse.__init__
def new_HTTPResponse__init__(self, *args, **kwargs):
self.orig__init__(*args, **kwargs)
try:
self.peercert = self._connection.sock.getpeercert(binary_form=True)
except AttributeError:
pass
HTTPResponse.__init__ = new_HTTPResponse__init__
HTTPAdapter = requests.adapters.HTTPAdapter
HTTPAdapter.orig_build_response = HTTPAdapter.build_response
def new_HTTPAdapter_build_response(self, request, resp):
response = self.orig_build_response(request, resp)
try:
response.peercert = resp.peercert
except AttributeError:
pass
return response
HTTPAdapter.build_response = new_HTTPAdapter_build_response
def unpatchRequests(self):
HTTPResponse = requests.packages.urllib3.response.HTTPResponse
if getattr(HTTPResponse, 'orig__init__', None) is not None:
HTTPResponse.__init__ = HTTPResponse.orig__init__
del(HTTPResponse.orig__init__)
HTTPAdapter = requests.adapters.HTTPAdapter
if getattr(HTTPAdapter, 'orig_build_response', None) is not None:
HTTPAdapter.build_response = HTTPAdapter.orig_build_response
del(HTTPAdapter.orig_build_response)
def setUp(self):
# patch requests in order to being able to extract SSL certs
self.patchRequests()
def tearDown(self):
self.unpatchRequests()
super(HttpFrontendTestCase, self).tearDown()
def parseParameterDict(self, parameter_dict):
parsed_parameter_dict = {}
for key, value in parameter_dict.items():
if key in [
'rejected-slave-dict',
'warning-slave-dict',
'warning-list',
'request-error-list',
'log-access-url']:
value = json.loads(value)
parsed_parameter_dict[key] = value
return parsed_parameter_dict
def getMasterPartitionPath(self):
# partition with etc/nginx-rejected-slave.conf
return [
q for q in glob.glob(os.path.join(self.instance_path, '*',))
if os.path.exists(
os.path.join(q, 'etc', 'nginx-rejected-slave.conf'))][0]
def parseConnectionParameterDict(self):
return self.parseParameterDict(
self.requestDefaultInstance().getConnectionParameterDict()
)
@classmethod
def waitForMethod(cls, name, method):
wait_time = 600
begin = time.time()
try_num = 0
cls.logger.debug('%s for %is' % (name, wait_time,))
while True:
try:
try_num += 1
method()
except Exception:
if time.time() - begin > wait_time:
cls.logger.exception(
"Error during %s after %.2fs" % (name, (time.time() - begin),))
raise
else:
time.sleep(0.5)
else:
cls.logger.info("%s took %.2fs" % (name, (time.time() - begin),))
break
@classmethod
def waitForCaddy(cls):
def method():
fakeHTTPSResult(
cls._ipv4_address,
'/',
)
cls.waitForMethod('waitForCaddy', method)
@classmethod
def _cleanup(cls, snapshot_name):
cls.cleanUpCertificate()
cls.stopServerProcess()
super(HttpFrontendTestCase, cls)._cleanup(snapshot_name)
@classmethod
def _workingDirectorySetUp(cls):
# do working directory
cls.working_directory = os.path.join(os.path.realpath(
os.environ.get(
'SLAPOS_TEST_WORKING_DIR',
os.path.join(os.getcwd(), '.slapos'))),
'caddy-frontend-test')
if not os.path.isdir(cls.working_directory):
os.mkdir(cls.working_directory)
@classmethod
def setUpClass(cls):
try:
cls.createWildcardExampleComCertificate()
cls.prepareCertificate()
# find ports once to be able startServerProcess many times
cls._server_http_port = findFreeTCPPort(cls._ipv4_address)
cls._server_https_port = findFreeTCPPort(cls._ipv4_address)
cls._server_https_auth_port = findFreeTCPPort(cls._ipv4_address)
cls._server_netloc_a_http_port = findFreeTCPPort(cls._ipv4_address)
cls._server_netloc_b_http_port = findFreeTCPPort(cls._ipv4_address)
cls.startServerProcess()
except BaseException:
cls.logger.exception("Error during setUpClass")
cls._cleanup("{}.{}.setUpClass".format(cls.__module__, cls.__name__))
cls.setUp = lambda self: self.fail('Setup Class failed.')
raise
super(HttpFrontendTestCase, cls).setUpClass()
try:
cls._workingDirectorySetUp()
# expose instance directory
cls.instance_path = cls.slap.instance_directory
# expose software directory, extract from found computer partition
cls.software_path = os.path.realpath(os.path.join(
cls.computer_partition_root_path, 'software_release'))
cls.setUpMaster()
cls.waitForCaddy()
except BaseException:
cls.logger.exception("Error during setUpClass")
# "{}.{}.setUpClass".format(cls.__module__, cls.__name__) is already used
# by SlapOSInstanceTestCase.setUpClass so we use another name for
# snapshot, to make sure we don't store another snapshot in same
# directory.
cls._cleanup("{}.SlaveHttpFrontendTestCase.{}.setUpClass".format(
cls.__module__, cls.__name__))
cls.setUp = lambda self: self.fail('Setup Class failed.')
raise
class SlaveHttpFrontendTestCase(HttpFrontendTestCase):
def _get_backend_haproxy_configuration(self):
backend_configuration_file = glob.glob(os.path.join(
self.instance_path, '*', 'etc', 'backend-haproxy.cfg'))[0]
with open(backend_configuration_file) as fh:
return fh.read()
@classmethod
def requestDefaultInstance(cls, state='started'):
default_instance = super(
SlaveHttpFrontendTestCase, cls).requestDefaultInstance(state=state)
if state != 'destroyed':
cls.requestSlaves()
return default_instance
@classmethod
def requestSlaveInstance(cls, partition_reference, partition_parameter_kw):
software_url = cls.getSoftwareURL()
software_type = cls.getInstanceSoftwareType()
cls.logger.debug(
'requesting slave "%s" type: %r software:%s parameters:%s',
partition_reference, software_type, software_url, partition_parameter_kw)
return cls.slap.request(
software_release=software_url,
software_type=software_type,
partition_reference=partition_reference,
partition_parameter_kw=partition_parameter_kw,
shared=True
)
@classmethod
def requestSlaves(cls):
for slave_reference, partition_parameter_kw in cls\
.getSlaveParameterDictDict().items():
software_url = cls.getSoftwareURL()
software_type = cls.getInstanceSoftwareType()
cls.logger.debug(
'requesting slave "%s" type: %r software:%s parameters:%s',
slave_reference, software_type, software_url, partition_parameter_kw)
cls.requestSlaveInstance(
partition_reference=slave_reference,
partition_parameter_kw=partition_parameter_kw,
)
@classmethod
def setUpClass(cls):
super(SlaveHttpFrontendTestCase, cls).setUpClass()
try:
cls.setUpSlaves()
cls.waitForSlave()
except BaseException:
cls.logger.exception("Error during setUpClass")
# "{}.{}.setUpClass".format(cls.__module__, cls.__name__) is already used
# by SlapOSInstanceTestCase.setUpClass so we use another name for
# snapshot, to make sure we don't store another snapshot in same
# directory.
cls._cleanup("{}.SlaveHttpFrontendTestCase.{}.setUpClass".format(
cls.__module__, cls.__name__))
cls.setUp = lambda self: self.fail('Setup Class failed.')
raise
@classmethod
def waitForSlave(cls):
def method():
for parameter_dict in cls.getSlaveConnectionParameterDictList():
if 'domain' in parameter_dict:
try:
fakeHTTPSResult(
parameter_dict['domain'], '/')
except requests.exceptions.InvalidURL:
# ignore slaves to which connection is impossible by default
continue
cls.waitForMethod('waitForSlave', method)
@classmethod
def getSlaveConnectionParameterDictList(cls):
parameter_dict_list = []
for slave_reference, partition_parameter_kw in cls\
.getSlaveParameterDictDict().items():
parameter_dict_list.append(cls.requestSlaveInstance(
partition_reference=slave_reference,
partition_parameter_kw=partition_parameter_kw,
).getConnectionParameterDict())
return parameter_dict_list
@classmethod
def untilSlavePartitionReady(cls):
# all on-watch services shall not be exited
for process in cls.callSupervisorMethod('getAllProcessInfo'):
if process['name'].endswith('-on-watch') and \
process['statename'] == 'EXITED':
if process['name'].startswith('monitor-http'):
continue
return False
for parameter_dict in cls.getSlaveConnectionParameterDictList():
log_access_ready = 'log-access-url' in parameter_dict
key = 'key-generate-auth-url'
key_generate_auth_ready = key in parameter_dict \
and 'NotReadyYet' not in parameter_dict[key]
if not(log_access_ready and key_generate_auth_ready):
return False
return True
@classmethod
def setUpSlaves(cls):
cls.runComputerPartitionUntil(
cls.untilSlavePartitionReady)
cls.updateSlaveConnectionParameterDictDict()
@classmethod
def updateSlaveConnectionParameterDictDict(cls):
cls.slave_connection_parameter_dict_dict = {}
# run partition for slaves to be setup
for slave_reference, partition_parameter_kw in cls\
.getSlaveParameterDictDict().items():
slave_instance = cls.requestSlaveInstance(
partition_reference=slave_reference,
partition_parameter_kw=partition_parameter_kw,
)
cls.slave_connection_parameter_dict_dict[slave_reference] = \
slave_instance.getConnectionParameterDict()
def parseSlaveParameterDict(self, key):
return self.parseParameterDict(
self.slave_connection_parameter_dict_dict[
key
]
)
def assertSlaveBase(
self, reference, expected_parameter_dict=None, hostname=None):
if expected_parameter_dict is None:
expected_parameter_dict = {}
parameter_dict = self.parseSlaveParameterDict(reference)
self.assertLogAccessUrlWithPop(parameter_dict)
self.current_generate_auth, self.current_upload_url = \
self.assertKedifaKeysWithPop(parameter_dict, '')
self.assertNodeInformationWithPop(parameter_dict)
if hostname is None:
hostname = reference.translate(None, '_-').lower()
expected_parameter_dict.update(**{
'domain': '%s.example.com' % (hostname,),
'replication_number': '1',
'url': 'http://%s.example.com' % (hostname, ),
'site_url': 'http://%s.example.com' % (hostname, ),
'secure_access': 'https://%s.example.com' % (hostname, ),
'backend-client-caucase-url': 'http://[%s]:8990' % self._ipv6_address,
})
self.assertEqual(
expected_parameter_dict,
parameter_dict
)
return parameter_dict
def assertLastLogLineRegexp(self, log_name, log_regexp):
log_file = glob.glob(
os.path.join(
self.instance_path, '*', 'var', 'log', 'httpd', log_name
))[0]
self.assertRegexpMatches(
open(log_file, 'r').readlines()[-1],
log_regexp)
class TestMasterRequestDomain(HttpFrontendTestCase, TestDataMixin):
@classmethod
def getInstanceParameterDict(cls):
return {
'domain': 'example.com',
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
}
def test(self):
parameter_dict = self.parseConnectionParameterDict()
self.assertKeyWithPop('monitor-setup-url', parameter_dict)
self.assertBackendHaproxyStatisticUrl(parameter_dict)
self.assertKedifaKeysWithPop(parameter_dict, 'master-')
self.assertRejectedSlavePromiseEmptyWithPop(parameter_dict)
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'monitor-base-url': 'https://[%s]:8401' % self._ipv6_address,
'backend-client-caucase-url': 'http://[%s]:8990' % self._ipv6_address,
'domain': 'example.com',
'accepted-slave-amount': '0',
'rejected-slave-amount': '0',
'slave-amount': '0',
'rejected-slave-dict': {}
},
parameter_dict
)
class TestMasterRequest(HttpFrontendTestCase, TestDataMixin):
@classmethod
def getInstanceParameterDict(cls):
return {
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
}
def test(self):
parameter_dict = self.parseConnectionParameterDict()
self.assertKeyWithPop('monitor-setup-url', parameter_dict)
self.assertBackendHaproxyStatisticUrl(parameter_dict)
self.assertKedifaKeysWithPop(parameter_dict, 'master-')
self.assertRejectedSlavePromiseEmptyWithPop(parameter_dict)
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'monitor-base-url': 'https://[%s]:8401' % self._ipv6_address,
'backend-client-caucase-url': 'http://[%s]:8990' % self._ipv6_address,
'domain': 'None',
'accepted-slave-amount': '0',
'rejected-slave-amount': '0',
'slave-amount': '0',
'rejected-slave-dict': {}},
parameter_dict
)
class TestMasterAIKCDisabledAIBCCDisabledRequest(
HttpFrontendTestCase, TestDataMixin):
@classmethod
def getInstanceParameterDict(cls):
return {
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
'automatic-internal-kedifa-caucase-csr': 'false',
'automatic-internal-backend-client-caucase-csr': 'false',
}
@classmethod
def _setUpClass(cls):
instance_max_retry = cls.instance_max_retry
try:
cls.instance_max_retry = 3
super(TestMasterAIKCDisabledAIBCCDisabledRequest, cls)._setUpClass()
except SlapOSNodeInstanceError: # Note: SLAPOS_TEST_DEBUG=1 will interrupt
pass
else:
raise ValueError('_setUpClass unexpected success')
# Cluster requested without automatic certificate handling will never
# stabilize, as nodes can't join to the cluster, so the user is required
# to first manually create key and certificate for himself, then manually
# create certificates for services
cls._workingDirectorySetUp()
_, kedifa_key_pem, _, kedifa_csr_pem = createCSR('Kedifa User')
_, backend_client_key_pem, _, backend_client_csr_pem = createCSR(
'Backend Client User')
parameter_dict = cls.requestDefaultInstance(
).getConnectionParameterDict()
cls._fetchKedifaCaucaseCaCertificateFile(parameter_dict)
cls._fetchBackendClientCaCertificateFile(parameter_dict)
with open(cls.kedifa_caucase_ca_certificate_file) as fh:
kedifa_ca_pem = fh.read()
with open(cls.backend_client_caucase_ca_certificate_file) as fh:
backend_client_ca_pem = fh.read()
kedifa_caucase_url = parameter_dict['kedifa-caucase-url']
backend_client_caucase_url = parameter_dict['backend-client-caucase-url']
# Simulate human: create user keys
def getCauCertificate(ca_url, ca_pem, csr_pem):
cau_client = caucase.client.CaucaseClient(
ca_url=ca_url + '/cau',
ca_crt_pem_list=caucase.utils.getCertList(ca_pem),
)
csr_id = cau_client.createCertificateSigningRequest(csr_pem)
return cau_client.getCertificate(csr_id)
kedifa_crt_pem = getCauCertificate(
kedifa_caucase_url, kedifa_ca_pem, kedifa_csr_pem)
backend_client_crt_pem = getCauCertificate(
backend_client_caucase_url, backend_client_ca_pem,
backend_client_csr_pem)
kedifa_key_file = os.path.join(cls.working_directory, 'kedifa-key.pem')
with open(kedifa_key_file, 'w') as fh:
fh.write(kedifa_crt_pem + kedifa_key_pem)
backend_client_key_file = os.path.join(
cls.working_directory, 'backend-client-key.pem')
with open(backend_client_key_file, 'w') as fh:
fh.write(backend_client_crt_pem + backend_client_key_pem)
# Simulate human: create service keys
def signAllCasCsr(ca_url, ca_pem, user_key, pending_csr_amount):
client = caucase.client.CaucaseClient(
ca_url=ca_url + '/cas',
ca_crt_pem_list=caucase.utils.getCertList(ca_pem), user_key=user_key)
pending_csr_list = client.getPendingCertificateRequestList()
assert len(pending_csr_list) == pending_csr_amount
for csr_entry in pending_csr_list:
client.createCertificate(int(csr_entry['id']))
signAllCasCsr(kedifa_caucase_url, kedifa_ca_pem, kedifa_key_file, 2)
signAllCasCsr(
backend_client_caucase_url, backend_client_ca_pem,
backend_client_key_file, 1)
# Continue instance processing, copy&paste from
# slapos.testing.testcase.SlapOSInstanceTestCase._setUpClass
# as we hack a lot
cls.instance_max_retry = instance_max_retry
cls.waitForInstance()
cls.computer_partition = cls.requestDefaultInstance()
cls.computer_partition_root_path = os.path.join(
cls.slap._instance_root, cls.computer_partition.getId())
def test(self):
parameter_dict = self.parseConnectionParameterDict()
self.assertKeyWithPop('monitor-setup-url', parameter_dict)
self.assertBackendHaproxyStatisticUrl(parameter_dict)
self.assertKedifaKeysWithPop(parameter_dict, 'master-')
self.assertRejectedSlavePromiseEmptyWithPop(parameter_dict)
self.assertKeyWithPop('kedifa-csr-certificate', parameter_dict)
self.assertKeyWithPop('kedifa-csr-url', parameter_dict)
self.assertKeyWithPop('caddy-frontend-1-kedifa-csr-url', parameter_dict)
self.assertKeyWithPop(
'caddy-frontend-1-backend-client-csr-url', parameter_dict)
self.assertKeyWithPop(
'caddy-frontend-1-csr-certificate', parameter_dict)
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'monitor-base-url': 'https://[%s]:8401' % self._ipv6_address,
'backend-client-caucase-url': 'http://[%s]:8990' % self._ipv6_address,
'domain': 'None',
'accepted-slave-amount': '0',
'rejected-slave-amount': '0',
'slave-amount': '0',
'rejected-slave-dict': {}},
parameter_dict
)
class TestSlave(SlaveHttpFrontendTestCase, TestDataMixin):
@classmethod
def getInstanceParameterDict(cls):
return {
'domain': 'example.com',
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
'mpm-graceful-shutdown-timeout': 2,
'request-timeout': '12',
}
@classmethod
def prepareCertificate(cls):
cls.ca = CertificateAuthority('TestSlave')
_, cls.customdomain_ca_key_pem, csr, _ = createCSR(
'customdomainsslcrtsslkeysslcacrt.example.com')
_, cls.customdomain_ca_certificate_pem = cls.ca.signCSR(csr)
_, cls.customdomain_key_pem, _, cls.customdomain_certificate_pem = \
createSelfSignedCertificate(['customdomainsslcrtsslkey.example.com'])
super(TestSlave, cls).prepareCertificate()
@classmethod
def getSlaveParameterDictDict(cls):
return {
'empty': {
},
'bad-backend': {
'url': 'http://bad.backend/',
},
'Url': {
# make URL "incorrect", with whitespace, nevertheless it shall be
# correctly handled
'url': ' ' + cls.backend_url + '/?a=b&c=' + ' ',
# authenticating to http backend shall be no-op
'authenticate-to-backend': True,
},
'url-netloc-list': {
'url': cls.backend_url,
'url-netloc-list': '%(ip)s:%(port_a)s %(ip)s:%(port_b)s' % {
'ip': cls._ipv4_address,
'port_a': cls._server_netloc_a_http_port,
'port_b': cls._server_netloc_b_http_port},
},
'auth-to-backend': {
# in here use reserved port for the backend, which is going to be
# started later
'url': 'https://%s:%s/' % (
cls._ipv4_address, cls._server_https_auth_port),
'authenticate-to-backend': True,
},
'auth-to-backend-not-configured': {
# in here use reserved port for the backend, which is going to be
# started later
'url': 'https://%s:%s/' % (
cls._ipv4_address, cls._server_https_auth_port),
},
'auth-to-backend-backend-ignore': {
'url': cls.backend_https_url,
'authenticate-to-backend': True,
},
'url_https-url': {
'url': cls.backend_url + 'http',
'https-url': cls.backend_url + 'https',
'backend-connect-timeout': 10,
'backend-connect-retries': 5,
'request-timeout': 15,
'strict-transport-security': '200',
'strict-transport-security-sub-domains': True,
'strict-transport-security-preload': True,
},
'https-url-netloc-list': {
'url': cls.backend_url + 'http',
'https-url': cls.backend_url + 'https',
'https-url-netloc-list': '%(ip)s:%(port_a)s %(ip)s:%(port_b)s' % {
'ip': cls._ipv4_address,
'port_a': cls._server_netloc_a_http_port,
'port_b': cls._server_netloc_b_http_port},
},
'server-alias': {
'url': cls.backend_url,
'server-alias': 'alias1.example.com alias2.example.com',
'strict-transport-security': '200',
},
'server-alias-empty': {
'url': cls.backend_url,
'server-alias': '',
'strict-transport-security': '200',
'strict-transport-security-sub-domains': True,
},
'server-alias-wildcard': {
'url': cls.backend_url,
'server-alias': '*.alias1.example.com',
'strict-transport-security': '200',
'strict-transport-security-preload': True,
},
'server-alias-duplicated': {
'url': cls.backend_url,
'server-alias': 'alias3.example.com alias3.example.com',
},
'server-alias_custom_domain-duplicated': {
'url': cls.backend_url,
'custom_domain': 'alias4.example.com',
'server-alias': 'alias4.example.com alias4.example.com',
},
'ssl-proxy-verify_ssl_proxy_ca_crt': {
'url': cls.backend_https_url,
'ssl-proxy-verify': True,
'ssl_proxy_ca_crt': cls.test_server_ca.certificate_pem,
},
'ssl-proxy-verify_ssl_proxy_ca_crt-unverified': {
'url': cls.backend_https_url,
'ssl-proxy-verify': True,
'ssl_proxy_ca_crt': cls.another_server_ca.certificate_pem,
},
'ssl-proxy-verify-unverified': {
'url': cls.backend_https_url,
'ssl-proxy-verify': True,
},
'https-only': {
'url': cls.backend_url,
'https-only': False,
},
'custom_domain': {
'url': cls.backend_url,
'custom_domain': 'mycustomdomain.example.com',
},
'custom_domain_wildcard': {
'url': cls.backend_url,
'custom_domain': '*.customdomain.example.com',
},
'custom_domain_server_alias': {
'url': cls.backend_url,
'custom_domain': 'mycustomdomainserveralias.example.com',
'server-alias': 'mycustomdomainserveralias1.example.com',
},
'custom_domain_ssl_crt_ssl_key': {
'url': cls.backend_url,
'custom_domain': 'customdomainsslcrtsslkey.example.com',
},
'custom_domain_ssl_crt_ssl_key_ssl_ca_crt': {
'url': cls.backend_url,
'custom_domain': 'customdomainsslcrtsslkeysslcacrt.example.com',
},
'ssl_ca_crt_only': {
'url': cls.backend_url,
},
'ssl_ca_crt_garbage': {
'url': cls.backend_url,
},
'ssl_ca_crt_does_not_match': {
'url': cls.backend_url,
},
'type-zope': {
'url': cls.backend_url,
'type': 'zope',
},
'type-zope-prefer-gzip-encoding-to-backend': {
'url': cls.backend_url,
'prefer-gzip-encoding-to-backend': 'true',
'type': 'zope',
},
'type-zope-prefer-gzip-encoding-to-backend-https-only': {
'url': cls.backend_url,
'prefer-gzip-encoding-to-backend': 'true',
'type': 'zope',
'https-only': 'false',
},
'type-zope-virtualhostroot-http-port': {
'url': cls.backend_url,
'type': 'zope',
'virtualhostroot-http-port': '12345',
'https-only': 'false',
},
'type-zope-virtualhostroot-https-port': {
'url': cls.backend_url,
'type': 'zope',
'virtualhostroot-https-port': '12345'
},
'type-zope-path': {
'url': cls.backend_url,
'type': 'zope',
'path': '///path/to/some/resource///',
},
'type-zope-default-path': {
'url': cls.backend_url,
'type': 'zope',
'default-path': '///default-path/to/some/resource///',
},
'type-notebook': {
'url': cls.backend_url,
'type': 'notebook',
},
'type-websocket': {
'url': cls.backend_url,
'type': 'websocket',
},
'type-websocket-websocket-path-list': {
'url': cls.backend_url,
'type': 'websocket',
'websocket-path-list': '////ws//// /with%20space/',
},
'type-websocket-websocket-transparent-false': {
'url': cls.backend_url,
'type': 'websocket',
'websocket-transparent': 'false',
},
'type-websocket-websocket-path-list-websocket-transparent-false': {
'url': cls.backend_url,
'type': 'websocket',
'websocket-path-list': '////ws//// /with%20space/',
'websocket-transparent': 'false',
},
'type-redirect': {
'url': cls.backend_url,
'type': 'redirect',
},
'type-redirect-custom_domain': {
'url': cls.backend_url,
'type': 'redirect',
'custom_domain': 'customdomaintyperedirect.example.com',
},
'enable_cache': {
'url': cls.backend_url,
'enable_cache': True,
},
'enable_cache_custom_domain': {
'url': cls.backend_url,
'enable_cache': True,
'custom_domain': 'customdomainenablecache.example.com',
},
'enable_cache_server_alias': {
'url': cls.backend_url,
'enable_cache': True,
'server-alias': 'enablecacheserveralias1.example.com',
},
'enable_cache-disable-no-cache-request': {
'url': cls.backend_url,
'enable_cache': True,
'disable-no-cache-request': True,
},
'enable_cache-disable-via-header': {
'url': cls.backend_url,
'enable_cache': True,
'disable-via-header': True,
},
'enable_cache-https-only-false': {
'url': cls.backend_url,
'https-only': False,
'enable_cache': True,
},
'enable-http2-false': {
'url': cls.backend_url,
'enable-http2': False,
},
'enable-http2-default': {
'url': cls.backend_url,
},
'prefer-gzip-encoding-to-backend': {
'url': cls.backend_url,
'prefer-gzip-encoding-to-backend': 'true',
},
'prefer-gzip-encoding-to-backend-https-only': {
'url': cls.backend_url,
'prefer-gzip-encoding-to-backend': 'true',
'https-only': 'false',
},
'disabled-cookie-list': {
'url': cls.backend_url,
'disabled-cookie-list': 'Chocolate Vanilia',
},
'monitor-ipv4-test': {
'monitor-ipv4-test': 'monitor-ipv4-test',
},
'monitor-ipv6-test': {
'monitor-ipv6-test': 'monitor-ipv6-test',
},
'ciphers': {
'ciphers': 'RSA-3DES-EDE-CBC-SHA RSA-AES128-CBC-SHA',
}
}
monitor_setup_url_key = 'monitor-setup-url'
def test_monitor_setup(self):
IP = self._ipv6_address
self.monitor_configuration_list = [
{
'htmlUrl': 'https://[%s]:8401/public/feed' % (IP,),
'text': 'testing partition 0',
'title': 'testing partition 0',
'type': 'rss',
'url': 'https://[%s]:8401/share/private/' % (IP,),
'version': 'RSS',
'xmlUrl': 'https://[%s]:8401/public/feed' % (IP,),
},
{
'htmlUrl': 'https://[%s]:8402/public/feed' % (IP,),
'text': 'kedifa',
'title': 'kedifa',
'type': 'rss',
'url': 'https://[%s]:8402/share/private/' % (IP,),
'version': 'RSS',
'xmlUrl': 'https://[%s]:8402/public/feed' % (IP,),
},
{
'htmlUrl': 'https://[%s]:8411/public/feed' % (IP,),
'text': 'caddy-frontend-1',
'title': 'caddy-frontend-1',
'type': 'rss',
'url': 'https://[%s]:8411/share/private/' % (IP,),
'version': 'RSS',
'xmlUrl': 'https://[%s]:8411/public/feed' % (IP,),
},
]
connection_parameter_dict = self\
.computer_partition.getConnectionParameterDict()
self.assertTrue(
self.monitor_setup_url_key in connection_parameter_dict,
'%s not in %s' % (self.monitor_setup_url_key, connection_parameter_dict))
monitor_setup_url_value = connection_parameter_dict[
self.monitor_setup_url_key]
monitor_url_match = re.match(r'.*url=(.*)', monitor_setup_url_value)
self.assertNotEqual(
None, monitor_url_match, '%s not parsable' % (monitor_setup_url_value,))
self.assertEqual(1, len(monitor_url_match.groups()))
monitor_url = monitor_url_match.groups()[0]
monitor_url_split = monitor_url.split('&')
self.assertEqual(
3, len(monitor_url_split), '%s not splitabble' % (monitor_url,))
self.monitor_url = monitor_url_split[0]
monitor_username = monitor_url_split[1].split('=')
self.assertEqual(
2, len(monitor_username), '%s not splittable' % (monitor_username))
monitor_password = monitor_url_split[2].split('=')
self.assertEqual(
2, len(monitor_password), '%s not splittable' % (monitor_password))
self.monitor_username = monitor_username[1]
self.monitor_password = monitor_password[1]
opml_text = requests.get(self.monitor_url, verify=False).text
opml = ET.fromstring(opml_text)
body = opml[1]
self.assertEqual('body', body.tag)
outline_list = body[0].findall('outline')
self.assertEqual(
self.monitor_configuration_list,
[q.attrib for q in outline_list]
)
expected_status_code_list = []
got_status_code_list = []
for monitor_configuration in self.monitor_configuration_list:
status_code = requests.get(
monitor_configuration['url'],
verify=False,
auth=(self.monitor_username, self.monitor_password)
).status_code
expected_status_code_list.append(
{
'url': monitor_configuration['url'],
'status_code': 200
}
)
got_status_code_list.append(
{
'url': monitor_configuration['url'],
'status_code': status_code
}
)
self.assertEqual(
expected_status_code_list,
got_status_code_list
)
def getSlavePartitionPath(self):
# partition w/ etc/trafficserver
return [
q for q in glob.glob(os.path.join(self.instance_path, '*',))
if os.path.exists(os.path.join(q, 'etc', 'trafficserver'))][0]
def test_trafficserver_logrotate(self):
ats_partition = [
q for q in glob.glob(os.path.join(self.instance_path, '*',))
if os.path.exists(os.path.join(q, 'bin', 'trafficserver-rotate'))][0]
ats_log_dir = os.path.join(ats_partition, 'var', 'log', 'trafficserver')
ats_logrotate_dir = os.path.join(
ats_partition, 'srv', 'backup', 'logrotate', 'trafficserver')
ats_rotate = os.path.join(ats_partition, 'bin', 'trafficserver-rotate')
old_file_name = 'log-old.old'
older_file_name = 'log-older.old'
with open(os.path.join(ats_log_dir, old_file_name), 'w') as fh:
fh.write('old')
with open(os.path.join(ats_log_dir, older_file_name), 'w') as fh:
fh.write('older')
# check rotation
result, output = subprocess_status_output([ats_rotate])
self.assertEqual(0, result)
self.assertEqual(
set(['log-old.old.xz', 'log-older.old.xz']),
set(os.listdir(ats_logrotate_dir)))
self.assertFalse(old_file_name + '.xz' in os.listdir(ats_log_dir))
self.assertFalse(older_file_name + '.xz' in os.listdir(ats_log_dir))
with lzma.open(
os.path.join(ats_logrotate_dir, old_file_name + '.xz')) as fh:
self.assertEqual(
'old',
fh.read()
)
with lzma.open(
os.path.join(ats_logrotate_dir, older_file_name + '.xz')) as fh:
self.assertEqual(
'older',
fh.read()
)
# check retention
old_time = time.time() - (400 * 24 * 3600)
os.utime(
os.path.join(ats_logrotate_dir, older_file_name + '.xz'),
(old_time, old_time))
result, output = subprocess_status_output([ats_rotate])
self.assertEqual(0, result)
self.assertEqual(
['log-old.old.xz'],
os.listdir(ats_logrotate_dir))
def test_master_partition_state(self):
parameter_dict = self.parseConnectionParameterDict()
self.assertKeyWithPop('monitor-setup-url', parameter_dict)
self.assertBackendHaproxyStatisticUrl(parameter_dict)
self.assertKedifaKeysWithPop(parameter_dict, 'master-')
self.assertRejectedSlavePromiseEmptyWithPop(parameter_dict)
self.assertNodeInformationWithPop(parameter_dict)
expected_parameter_dict = {
'monitor-base-url': 'https://[%s]:8401' % self._ipv6_address,
'backend-client-caucase-url': 'http://[%s]:8990' % self._ipv6_address,
'domain': 'example.com',
'accepted-slave-amount': '54',
'rejected-slave-amount': '0',
'slave-amount': '54',
'rejected-slave-dict': {
},
'warning-slave-dict': {
'_Url': [
"slave url ' %(backend)s/?a=b&c= ' has been converted to "
"'%(backend)s/?a=b&c='" % {'backend': self.backend_url}]}
}
self.assertEqual(
expected_parameter_dict,
parameter_dict
)
partition_path = self.getMasterPartitionPath()
# check that monitor cors domains are correctly setup by file presence, as
# we trust monitor stack being tested in proper place and it is too hard
# to have working monitor with local proxy
self.assertTestData(
open(
os.path.join(
partition_path, 'etc', 'httpd-cors.cfg'), 'r').read().strip())
def test_node_information_json(self):
node_information_file_path = glob.glob(os.path.join(
self.instance_path, '*', '.frontend-node-information.json'))[0]
with open(node_information_file_path, 'r') as fh:
current_node_information = json.load(fh)
modified_node_information = current_node_information.copy()
modified_node_information['version-hash-history'] = {'testhash': 'testurl'}
def writeNodeInformation(node_information, path):
with open(path, 'w') as fh:
json.dump(node_information, fh, sort_keys=True)
self.waitForInstance()
self.waitForInstance()
self.waitForInstance()
self.addCleanup(
writeNodeInformation, current_node_information,
node_information_file_path)
# simulate that upgrade happened
writeNodeInformation(
modified_node_information,
node_information_file_path)
parameter_dict = self.parseConnectionParameterDict()
expected_node_information = {
'node-id': current_node_information['node-id'],
'version-hash-history': current_node_information['version-hash-history']
}
expected_node_information['version-hash-history']['testhash'] = 'testurl'
self.assertEqual(
json.loads(parameter_dict['caddy-frontend-1-node-information-json']),
expected_node_information
)
def test_slave_partition_state(self):
partition_path = self.getSlavePartitionPath()
self.assertTrue(
'-grace 2s' in
open(os.path.join(partition_path, 'bin', 'caddy-wrapper'), 'r').read()
)
def test_monitor_conf(self):
monitor_conf_list = glob.glob(
os.path.join(
self.instance_path, '*', 'etc', 'monitor.conf'
))
self.assertEqual(3, len(monitor_conf_list))
expected = [(False, q) for q in monitor_conf_list]
got = [('!py!' in open(q).read(), q) for q in monitor_conf_list]
# check that no monitor.conf in generated configuratio has magic !py!
self.assertEqual(
expected,
got
)
def test_empty(self):
parameter_dict = self.assertSlaveBase('empty')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(httplib.SERVICE_UNAVAILABLE, result.status_code)
result_http = fakeHTTPResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
httplib.FOUND,
result_http.status_code
)
self.assertEqual(
'https://empty.example.com:%s/test-path' % (HTTP_PORT,),
result_http.headers['Location']
)
# check that 404 is as configured
result_missing = fakeHTTPSResult(
'forsuredoesnotexists.example.com', '')
self.assertEqual(httplib.NOT_FOUND, result_missing.status_code)
self.assertEqual(
"""
Instance not found
The instance has not been found
The reasons of this could be:
the instance does not exists or the URL is incorrect
in this case please check the URL
the instance has been stopped
in this case please check in the SlapOS Master if the instance is """
"""started or wait a bit for it to start
""",
result_missing.text
)
def test_server_polluted_keys_removed(self):
buildout_file = os.path.join(
self.getMasterPartitionPath(), 'instance-caddy-replicate.cfg')
for line in [
q for q in open(buildout_file).readlines()
if q.startswith('config-slave-list') or q.startswith(
'config-extra_slave_instance_list')]:
self.assertFalse('slave_title' in line)
self.assertFalse('slap_software_type' in line)
self.assertFalse('connection-parameter-hash' in line)
self.assertFalse('timestamp' in line)
def assertBackendHeaders(
self, backend_header_dict, domain, source_ip=SOURCE_IP, port=HTTPS_PORT,
proto='https', ignore_header_list=None, cached=False):
if ignore_header_list is None:
ignore_header_list = []
if 'Host' not in ignore_header_list:
self.assertEqual(
backend_header_dict['host'],
'%s:%s' % (domain, port))
self.assertEqual(
backend_header_dict['x-forwarded-for'],
source_ip
)
self.assertEqual(
backend_header_dict['x-forwarded-port'],
port
)
self.assertEqual(
backend_header_dict['x-forwarded-proto'],
proto
)
via_id = '%s-%s' % (
self.node_information_dict['node-id'],
self.node_information_dict['version-hash-history'].keys()[0])
if cached:
self.assertEqual(
[
'http/1.1 clientvia',
'HTTP/1.1 rapid-cdn-frontend-%(via_id)s, '
'http/1.1 rapid-cdn-cache-%(via_id)s' % dict(via_id=via_id),
'HTTP/1.1 rapid-cdn-backend-%(via_id)s' % dict(via_id=via_id)
],
backend_header_dict['via']
)
else:
self.assertEqual(
[
'http/1.1 clientvia',
'HTTP/1.1 rapid-cdn-frontend-%(via_id)s' % dict(via_id=via_id),
'HTTP/1.1 rapid-cdn-backend-%(via_id)s' % dict(via_id=via_id)
],
backend_header_dict['via']
)
def test_telemetry_disabled(self):
# here we trust that telemetry not present in error log means it was
# really disabled
error_log_file = glob.glob(
os.path.join(
self.instance_path, '*', 'var', 'log', 'frontend-error.log'))[0]
with open(error_log_file) as fh:
self.assertNotIn('Sending telemetry', fh.read(), 'Telemetry enabled')
def test_url(self):
parameter_dict = self.assertSlaveBase(
'Url',
{
'warning-list': [
"slave url ' %s/?a=b&c= ' has been converted to '%s/?a=b&c='" % (
self.backend_url, self.backend_url)],
}
)
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={
'Timeout': '10', # more than default backend-connect-timeout == 5
'Accept-Encoding': 'gzip',
'User-Agent': 'TEST USER AGENT',
}
)
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
headers = self.assertResponseHeaders(result)
self.assertNotIn('Strict-Transport-Security', headers)
self.assertEqualResultJson(result, 'Path', '?a=b&c=/test-path/deeper')
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertEqual(j['Incoming Headers']['timeout'], '10')
self.assertFalse('Content-Encoding' in headers)
self.assertBackendHeaders(j['Incoming Headers'], parameter_dict['domain'])
self.assertEqual(
'secured=value;secure, nonsecured=value',
headers['Set-Cookie']
)
self.assertLastLogLineRegexp(
'_Url_access_log',
r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3} - - '
r'\[\d{2}\/.{3}\/\d{4}\:\d{2}\:\d{2}\:\d{2} \+\d{4}\] '
r'"GET \/test-path\/deep\/..\/.\/deeper HTTP\/1.1" \d{3} '
r'\d+ "-" "TEST USER AGENT" \d+'
)
self.assertLastLogLineRegexp(
'_Url_backend_log',
r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d+ '
r'\[\d{2}\/.{3}\/\d{4}\:\d{2}\:\d{2}\:\d{2}.\d{3}\] '
r'http-backend _Url-http\/_Url-backend-http '
r'\d+/\d+\/\d+\/\d+\/\d+ '
r'200 \d+ - - ---- '
r'\d+\/\d+\/\d+\/\d+\/\d+ \d+\/\d+ '
r'"GET /test-path/deeper HTTP/1.1"'
)
result_http = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
httplib.FOUND,
result_http.status_code
)
headers = self.assertResponseHeaders(
result_http, via=False, backend_reached=False)
self.assertEqual(
'https://url.example.com:%s/test-path/deeper' % (HTTP_PORT,),
headers['Location']
)
# check that timeouts are correctly set in the haproxy configuration
backend_configuration_file = glob.glob(os.path.join(
self.instance_path, '*', 'etc', 'backend-haproxy.cfg'))[0]
with open(backend_configuration_file) as fh:
content = fh.read()
self.assertIn("""backend _Url-http
timeout server 12s
timeout connect 5s
retries 3""", content)
self.assertIn(""" timeout queue 60s
timeout server 12s
timeout client 12s
timeout connect 5s
retries 3""", content)
# check that no needless entries are generated
self.assertIn("backend _Url-http\n", content)
self.assertNotIn("backend _Url-https\n", content)
def test_url_netloc_list(self):
parameter_dict = self.assertSlaveBase('url-netloc-list')
result = fakeHTTPSResult(parameter_dict['domain'], 'path')
# assure that the request went to backend specified in the netloc
self.assertEqual(
result.headers['X-Backend-Identification'],
'netloc'
)
def test_auth_to_backend(self):
parameter_dict = self.assertSlaveBase('auth-to-backend')
self.startAuthenticatedServerProcess()
try:
# assert that you can't fetch nothing without key
try:
requests.get(self.backend_https_auth_url, verify=False)
except Exception:
pass
else:
self.fail(
'Access to %r shall be not possible without certificate' % (
self.backend_https_auth_url,))
# check that you can access this backend via frontend
# (so it means that auth to backend worked)
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={
'Timeout': '10', # more than default backend-connect-timeout == 5
'Accept-Encoding': 'gzip',
}
)
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertEqual(j['Incoming Headers']['timeout'], '10')
self.assertFalse('Content-Encoding' in result.headers)
self.assertBackendHeaders(
j['Incoming Headers'], parameter_dict['domain'])
self.assertEqual(
'secured=value;secure, nonsecured=value',
result.headers['Set-Cookie']
)
# proof that proper backend was accessed
self.assertEqual(
'Auth Backend',
result.headers['X-Backend-Identification']
)
finally:
self.stopAuthenticatedServerProcess()
def test_auth_to_backend_not_configured(self):
parameter_dict = self.assertSlaveBase('auth-to-backend-not-configured')
self.startAuthenticatedServerProcess()
try:
# assert that you can't fetch nothing without key
try:
requests.get(self.backend_https_auth_url, verify=False)
except Exception:
pass
else:
self.fail(
'Access to %r shall be not possible without certificate' % (
self.backend_https_auth_url,))
# check that you can access this backend via frontend
# (so it means that auth to backend worked)
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={
'Timeout': '10', # more than default backend-connect-timeout == 5
'Accept-Encoding': 'gzip',
}
)
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(
result.status_code,
httplib.BAD_GATEWAY
)
finally:
self.stopAuthenticatedServerProcess()
def test_auth_to_backend_backend_ignore(self):
parameter_dict = self.assertSlaveBase('auth-to-backend-backend-ignore')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={
'Timeout': '10', # more than default backend-connect-timeout == 5
'Accept-Encoding': 'gzip',
}
)
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertEqual(j['Incoming Headers']['timeout'], '10')
self.assertFalse('Content-Encoding' in result.headers)
self.assertBackendHeaders(j['Incoming Headers'], parameter_dict['domain'])
self.assertEqual(
'secured=value;secure, nonsecured=value',
result.headers['Set-Cookie']
)
result_http = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
httplib.FOUND,
result_http.status_code
)
self.assertEqual(
'https://authtobackendbackendignore.example.com:%s/test-path/deeper' % (
HTTP_PORT,),
result_http.headers['Location']
)
def test_compressed_result(self):
parameter_dict = self.assertSlaveBase(
'Url',
{
'warning-list': [
"slave url ' %s/?a=b&c= ' has been converted to '%s/?a=b&c='" % (
self.backend_url, self.backend_url)],
}
)
result_compressed = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={
'Accept-Encoding': 'gzip',
'Compress': '1',
}
)
self.assertEqual(
'gzip',
result_compressed.headers['Content-Encoding']
)
# Assert that no tampering was done with the request
# (compression/decompression)
# Backend compresses with 0 level, so decompression/compression
# would change somthing
self.assertEqual(
result_compressed.headers['Content-Length'],
result_compressed.headers['Backend-Content-Length']
)
result_not_compressed = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={
'Accept-Encoding': 'gzip',
}
)
self.assertFalse('Content-Encoding' in result_not_compressed.headers)
def test_no_content_type_alter(self):
parameter_dict = self.assertSlaveBase(
'Url',
{
'warning-list': [
"slave url ' %s/?a=b&c= ' has been converted to '%s/?a=b&c='" % (
self.backend_url, self.backend_url)],
}
)
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={
'Accept-Encoding': 'gzip',
'X-Reply-Body': base64.b64encode(
b"""
Tove
Jani
Reminder
Don't forget me this weekend!
"""),
'X-Drop-Header': 'Content-Type'
}
)
self.assertEqual(
'text/xml; charset=utf-8',
result.headers['Content-Type']
)
@skip('Feature postponed')
def test_url_ipv6_access(self):
parameter_dict = self.parseSlaveParameterDict('url')
self.assertLogAccessUrlWithPop(parameter_dict)
self.assertEqual(
{
'domain': 'url.example.com',
'replication_number': '1',
'url': 'http://url.example.com',
'site_url': 'http://url.example.com',
'secure_access': 'https://url.example.com',
},
parameter_dict
)
result_ipv6 = fakeHTTPSResult(
parameter_dict['domain'], self._ipv6_address, 'test-path',
source_ip=self._ipv6_address)
self.assertEqual(
self._ipv6_address,
result_ipv6.json()['Incoming Headers']['x-forwarded-for']
)
self.assertEqual(
self.certificate_pem,
der2pem(result_ipv6.peercert))
self.assertEqualResultJson(result_ipv6, 'Path', '/test-path')
def test_type_zope_path(self):
parameter_dict = self.assertSlaveBase('type-zope-path')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(
result,
'Path',
'/VirtualHostBase/'
'https/typezopepath.example.com:443/path/to/some/resource'
'/VirtualHostRoot/'
'test-path/deeper'
)
def test_type_zope_default_path(self):
parameter_dict = self.assertSlaveBase('type-zope-default-path')
result = fakeHTTPSResult(
parameter_dict['domain'], '')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(
httplib.MOVED_PERMANENTLY,
result.status_code
)
self.assertEqual(
'https://typezopedefaultpath.example.com:%s/'
'default-path/to/some/resource' % (
HTTPS_PORT,),
result.headers['Location']
)
def test_server_alias(self):
parameter_dict = self.assertSlaveBase('server-alias')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(
'max-age=200', result.headers['Strict-Transport-Security'])
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
result = fakeHTTPSResult(
'alias1.example.com',
'test-path/deep/.././deeper')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(
'max-age=200', result.headers['Strict-Transport-Security'])
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
result = fakeHTTPSResult(
'alias2.example.com',
'test-path/deep/.././deeper')
self.assertEqual(
'max-age=200', result.headers['Strict-Transport-Security'])
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
def test_server_alias_empty(self):
parameter_dict = self.assertSlaveBase('server-alias-empty')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={
'Timeout': '10', # more than default backend-connect-timeout == 5
'Accept-Encoding': 'gzip',
}
)
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(
'max-age=200; includeSubDomains',
result.headers['Strict-Transport-Security'])
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertEqual(j['Incoming Headers']['timeout'], '10')
self.assertFalse('Content-Encoding' in result.headers)
self.assertBackendHeaders(j['Incoming Headers'], parameter_dict['domain'])
self.assertEqual(
'secured=value;secure, nonsecured=value',
result.headers['Set-Cookie']
)
def test_server_alias_wildcard(self):
parameter_dict = self.assertSlaveBase('server-alias-wildcard')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(
'max-age=200; preload',
result.headers['Strict-Transport-Security'])
self.assertEqualResultJson(result, 'Path', '/test-path')
result = fakeHTTPSResult(
'wild.alias1.example.com', 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(
'max-age=200; preload',
result.headers['Strict-Transport-Security'])
self.assertEqualResultJson(result, 'Path', '/test-path')
def test_server_alias_duplicated(self):
parameter_dict = self.assertSlaveBase('server-alias-duplicated')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
result = fakeHTTPSResult(
'alias3.example.com', 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
def test_server_alias_custom_domain_duplicated(self):
parameter_dict = self.assertSlaveBase(
'server-alias_custom_domain-duplicated', hostname='alias4')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
@skip('Feature postponed')
def test_check_error_log(self):
# Caddy: Need to implement similar thing like check-error-on-apache-log
raise NotImplementedError(self.id())
def test_ssl_ca_crt(self):
parameter_dict = self.assertSlaveBase(
'custom_domain_ssl_crt_ssl_key_ssl_ca_crt')
# as now the place to put the key is known put the key there
auth = requests.get(
self.current_generate_auth,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, auth.status_code)
data = self.customdomain_ca_certificate_pem + \
self.customdomain_ca_key_pem + \
self.ca.certificate_pem
upload = requests.put(
self.current_upload_url + auth.text,
data=data,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, upload.status_code)
self.runKedifaUpdater()
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.customdomain_ca_certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
certificate_file_list = glob.glob(os.path.join(
self.instance_path, '*', 'srv', 'autocert',
'_custom_domain_ssl_crt_ssl_key_ssl_ca_crt.pem'))
self.assertEqual(1, len(certificate_file_list))
certificate_file = certificate_file_list[0]
with open(certificate_file) as out:
self.assertEqual(data, out.read())
def test_ssl_ca_crt_only(self):
self.assertSlaveBase('ssl_ca_crt_only')
# as now the place to put the key is known put the key there
auth = requests.get(
self.current_generate_auth,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, auth.status_code)
data = self.ca.certificate_pem
upload = requests.put(
self.current_upload_url + auth.text,
data=data,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.UNPROCESSABLE_ENTITY, upload.status_code)
self.assertEqual('Key incorrect', upload.text)
def test_ssl_ca_crt_garbage(self):
parameter_dict = self.assertSlaveBase('ssl_ca_crt_garbage')
# as now the place to put the key is known put the key there
auth = requests.get(
self.current_generate_auth,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, auth.status_code)
_, ca_key_pem, csr, _ = createCSR(
parameter_dict['domain'])
_, ca_certificate_pem = self.ca.signCSR(csr)
data = ca_certificate_pem + ca_key_pem + 'some garbage'
upload = requests.put(
self.current_upload_url + auth.text,
data=data,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, upload.status_code)
self.runKedifaUpdater()
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
ca_certificate_pem,
der2pem(result.peercert)
)
self.assertEqualResultJson(result, 'Path', '/test-path')
certificate_file_list = glob.glob(os.path.join(
self.instance_path, '*', 'srv', 'autocert',
'_ssl_ca_crt_garbage.pem'))
self.assertEqual(1, len(certificate_file_list))
certificate_file = certificate_file_list[0]
with open(certificate_file) as out:
self.assertEqual(data, out.read())
def test_ssl_ca_crt_does_not_match(self):
parameter_dict = self.assertSlaveBase('ssl_ca_crt_does_not_match')
# as now the place to put the key is known put the key there
auth = requests.get(
self.current_generate_auth,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, auth.status_code)
data = self.certificate_pem + self.key_pem + self.ca.certificate_pem
upload = requests.put(
self.current_upload_url + auth.text,
data=data,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, upload.status_code)
self.runKedifaUpdater()
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
certificate_file_list = glob.glob(os.path.join(
self.instance_path, '*', 'srv', 'autocert',
'_ssl_ca_crt_does_not_match.pem'))
self.assertEqual(1, len(certificate_file_list))
certificate_file = certificate_file_list[0]
with open(certificate_file) as out:
self.assertEqual(data, out.read())
def test_https_only(self):
parameter_dict = self.assertSlaveBase('https-only')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
result_http = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqualResultJson(result_http, 'Path', '/test-path/deeper')
def test_custom_domain(self):
parameter_dict = self.assertSlaveBase(
'custom_domain', hostname='mycustomdomain')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
def test_custom_domain_server_alias(self):
parameter_dict = self.assertSlaveBase(
'custom_domain_server_alias', hostname='mycustomdomainserveralias')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
result = fakeHTTPSResult(
'mycustomdomainserveralias1.example.com',
'test-path/deep/.././deeper')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
def test_custom_domain_wildcard(self):
self.assertSlaveBase(
'custom_domain_wildcard', hostname='*.customdomain')
result = fakeHTTPSResult(
'wild.customdomain.example.com',
'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
def test_custom_domain_ssl_crt_ssl_key(self):
parameter_dict = self.assertSlaveBase('custom_domain_ssl_crt_ssl_key')
# as now the place to put the key is known put the key there
auth = requests.get(
self.current_generate_auth,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, auth.status_code)
data = self.customdomain_certificate_pem + \
self.customdomain_key_pem
upload = requests.put(
self.current_upload_url + auth.text,
data=data,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, upload.status_code)
self.runKedifaUpdater()
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.customdomain_certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
def test_type_zope(self):
parameter_dict = self.assertSlaveBase('type-zope')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertBackendHeaders(j['Incoming Headers'], parameter_dict['domain'])
self.assertEqualResultJson(
result,
'Path',
'/VirtualHostBase/https/typezope.example.com:443'
'/VirtualHostRoot/test-path/deeper'
)
result = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
httplib.FOUND,
result.status_code
)
self.assertEqual(
'https://typezope.example.com:%s/test-path/deep/.././deeper' % (
HTTP_PORT,),
result.headers['Location']
)
def test_type_zope_prefer_gzip_encoding_to_backend_https_only(self):
parameter_dict = self.assertSlaveBase(
'type-zope-prefer-gzip-encoding-to-backend-https-only')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertBackendHeaders(j['Incoming Headers'], parameter_dict['domain'])
self.assertEqualResultJson(
result,
'Path',
'/VirtualHostBase/https/'
'typezopeprefergzipencodingtobackendhttpsonly.example.com:443'
'/VirtualHostRoot/test-path/deeper'
)
result = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqualResultJson(
result,
'Path',
'/VirtualHostBase/http/'
'typezopeprefergzipencodingtobackendhttpsonly.example.com:80'
'/VirtualHostRoot/test-path/deeper'
)
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={'Accept-Encoding': 'gzip, deflate'})
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertBackendHeaders(j['Incoming Headers'], parameter_dict['domain'])
self.assertEqualResultJson(
result,
'Path',
'/VirtualHostBase/https/'
'typezopeprefergzipencodingtobackendhttpsonly.example.com:443'
'/VirtualHostRoot/test-path/deeper'
)
self.assertEqual(
'gzip', result.json()['Incoming Headers']['accept-encoding'])
result = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={'Accept-Encoding': 'gzip, deflate'})
self.assertEqualResultJson(
result,
'Path',
'/VirtualHostBase/http/'
'typezopeprefergzipencodingtobackendhttpsonly.example.com:80'
'/VirtualHostRoot/test-path/deeper'
)
self.assertEqual(
'gzip', result.json()['Incoming Headers']['accept-encoding'])
def test_type_zope_prefer_gzip_encoding_to_backend(self):
parameter_dict = self.assertSlaveBase(
'type-zope-prefer-gzip-encoding-to-backend')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertBackendHeaders(j['Incoming Headers'], parameter_dict['domain'])
self.assertEqualResultJson(
result,
'Path',
'/VirtualHostBase/https/'
'typezopeprefergzipencodingtobackend.example.com:443'
'/VirtualHostRoot/test-path/deeper'
)
result = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
httplib.FOUND,
result.status_code
)
self.assertEqual(
'https://%s:%s/test-path/deep/.././deeper' % (
parameter_dict['domain'], HTTP_PORT),
result.headers['Location']
)
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={'Accept-Encoding': 'gzip, deflate'})
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertBackendHeaders(j['Incoming Headers'], parameter_dict['domain'])
self.assertEqualResultJson(
result,
'Path',
'/VirtualHostBase/https/'
'typezopeprefergzipencodingtobackend.example.com:443'
'/VirtualHostRoot/test-path/deeper'
)
self.assertEqual(
'gzip', result.json()['Incoming Headers']['accept-encoding'])
result = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={'Accept-Encoding': 'gzip, deflate'})
self.assertEqual(
httplib.FOUND,
result.status_code
)
self.assertEqual(
'https://%s:%s/test-path/deep/.././deeper' % (
parameter_dict['domain'], HTTP_PORT),
result.headers['Location']
)
def test_type_zope_virtualhostroot_http_port(self):
parameter_dict = self.assertSlaveBase(
'type-zope-virtualhostroot-http-port')
result = fakeHTTPResult(
parameter_dict['domain'], 'test-path')
self.assertEqualResultJson(
result,
'Path',
'/VirtualHostBase/http/typezopevirtualhostroothttpport'
'.example.com:12345/VirtualHostRoot/test-path'
)
def test_type_zope_virtualhostroot_https_port(self):
parameter_dict = self.assertSlaveBase(
'type-zope-virtualhostroot-https-port')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(
result,
'Path',
'/VirtualHostBase/https/typezopevirtualhostroothttpsport'
'.example.com:12345/VirtualHostRoot/test-path'
)
def test_type_notebook(self):
parameter_dict = self.assertSlaveBase('type-notebook')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path',
HTTPS_PORT)
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test/terminals/websocket/test',
HTTPS_PORT)
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/terminals/websocket')
self.assertFalse(
isHTTP2(parameter_dict['domain']))
def test_type_websocket(self):
parameter_dict = self.assertSlaveBase(
'type-websocket')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path',
headers={'Connection': 'Upgrade'})
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(
result,
'Path',
'/test-path'
)
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertBackendHeaders(j['Incoming Headers'], parameter_dict['domain'])
self.assertEqual(
'Upgrade',
j['Incoming Headers']['connection']
)
self.assertTrue('x-real-ip' in j['Incoming Headers'])
self.assertFalse(
isHTTP2(parameter_dict['domain']))
def test_type_websocket_websocket_transparent_false(self):
parameter_dict = self.assertSlaveBase(
'type-websocket-websocket-transparent-false')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path',
headers={'Connection': 'Upgrade'})
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(
result,
'Path',
'/test-path'
)
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
parsed = urlparse.urlparse(self.backend_url)
self.assertBackendHeaders(
j['Incoming Headers'], parsed.hostname, port='17', proto='irc',
ignore_header_list=['Host'])
self.assertEqual(
'Upgrade',
j['Incoming Headers']['connection']
)
self.assertFalse('x-real-ip' in j['Incoming Headers'])
self.assertFalse(
isHTTP2(parameter_dict['domain']))
def test_type_websocket_websocket_path_list(self):
parameter_dict = self.assertSlaveBase(
'type-websocket-websocket-path-list')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path',
headers={'Connection': 'Upgrade'})
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(
result,
'Path',
'/test-path'
)
self.assertFalse(
isHTTP2(parameter_dict['domain']))
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertBackendHeaders(j['Incoming Headers'], parameter_dict['domain'])
self.assertTrue('x-real-ip' in j['Incoming Headers'])
result = fakeHTTPSResult(
parameter_dict['domain'], 'ws/test-path',
headers={'Connection': 'Upgrade'})
self.assertEqualResultJson(
result,
'Path',
'/ws/test-path'
)
self.assertFalse(
isHTTP2(parameter_dict['domain']))
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertBackendHeaders(j['Incoming Headers'], parameter_dict['domain'])
self.assertEqual(
'Upgrade',
j['Incoming Headers']['connection']
)
self.assertTrue('x-real-ip' in j['Incoming Headers'])
result = fakeHTTPSResult(
parameter_dict['domain'],
'with%20space/test-path', headers={'Connection': 'Upgrade'})
self.assertEqualResultJson(
result,
'Path',
'/with%20space/test-path'
)
self.assertFalse(
isHTTP2(parameter_dict['domain']))
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertBackendHeaders(j['Incoming Headers'], parameter_dict['domain'])
self.assertEqual(
'Upgrade',
j['Incoming Headers']['connection']
)
self.assertTrue('x-real-ip' in j['Incoming Headers'])
def test_type_websocket_websocket_path_list_websocket_transparent_false(
self):
parameter_dict = self.assertSlaveBase(
'type-websocket-websocket-path-list-websocket-transparent-false')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path',
headers={'Connection': 'Upgrade'})
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(
result,
'Path',
'/test-path'
)
self.assertFalse(
isHTTP2(parameter_dict['domain']))
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
parsed = urlparse.urlparse(self.backend_url)
self.assertBackendHeaders(
j['Incoming Headers'], parsed.hostname, port='17', proto='irc',
ignore_header_list=['Host'])
self.assertFalse('x-real-ip' in j['Incoming Headers'])
result = fakeHTTPSResult(
parameter_dict['domain'], 'ws/test-path',
headers={'Connection': 'Upgrade'})
self.assertEqualResultJson(
result,
'Path',
'/ws/test-path'
)
self.assertFalse(
isHTTP2(parameter_dict['domain']))
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertBackendHeaders(
j['Incoming Headers'], parsed.hostname, port='17', proto='irc',
ignore_header_list=['Host'])
self.assertEqual(
'Upgrade',
j['Incoming Headers']['connection']
)
self.assertFalse('x-real-ip' in j['Incoming Headers'])
result = fakeHTTPSResult(
parameter_dict['domain'],
'with%20space/test-path', headers={'Connection': 'Upgrade'})
self.assertEqualResultJson(
result,
'Path',
'/with%20space/test-path'
)
self.assertFalse(
isHTTP2(parameter_dict['domain']))
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertBackendHeaders(
j['Incoming Headers'], parsed.hostname, port='17', proto='irc',
ignore_header_list=['Host'])
self.assertEqual(
'Upgrade',
j['Incoming Headers']['connection']
)
self.assertFalse('x-real-ip' in j['Incoming Headers'])
def test_type_redirect(self):
parameter_dict = self.assertSlaveBase('type-redirect')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(
httplib.FOUND,
result.status_code
)
self.assertEqual(
'%stest-path/deeper' % (self.backend_url,),
result.headers['Location']
)
def test_type_redirect_custom_domain(self):
parameter_dict = self.assertSlaveBase(
'type-redirect-custom_domain', hostname='customdomaintyperedirect')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(
httplib.FOUND,
result.status_code
)
self.assertEqual(
'%stest-path/deeper' % (self.backend_url,),
result.headers['Location']
)
def test_ssl_proxy_verify_ssl_proxy_ca_crt_unverified(self):
parameter_dict = self.assertSlaveBase(
'ssl-proxy-verify_ssl_proxy_ca_crt-unverified')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(
httplib.SERVICE_UNAVAILABLE,
result.status_code
)
result_http = fakeHTTPResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
httplib.FOUND,
result_http.status_code
)
self.assertEqual(
'https://sslproxyverifysslproxycacrtunverified.example.com:%s/'
'test-path' % (HTTP_PORT,),
result_http.headers['Location']
)
def test_ssl_proxy_verify_ssl_proxy_ca_crt(self):
parameter_dict = self.assertSlaveBase('ssl-proxy-verify_ssl_proxy_ca_crt')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertBackendHeaders(j['Incoming Headers'], parameter_dict['domain'])
self.assertFalse('Content-Encoding' in result.headers)
self.assertEqual(
'secured=value;secure, nonsecured=value',
result.headers['Set-Cookie']
)
result_http = fakeHTTPResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
httplib.FOUND,
result_http.status_code
)
self.assertEqual(
'https://sslproxyverifysslproxycacrt.example.com:%s/test-path' % (
HTTP_PORT,),
result_http.headers['Location']
)
def test_ssl_proxy_verify_unverified(self):
parameter_dict = self.assertSlaveBase('ssl-proxy-verify-unverified')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(
httplib.SERVICE_UNAVAILABLE,
result.status_code
)
def test_monitor_ipv6_test(self):
parameter_dict = self.assertSlaveBase('monitor-ipv6-test')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(httplib.SERVICE_UNAVAILABLE, result.status_code)
result_http = fakeHTTPResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
httplib.FOUND,
result_http.status_code
)
self.assertEqual(
'https://monitoripv6test.example.com:%s/test-path' % (HTTP_PORT,),
result_http.headers['Location']
)
monitor_file = glob.glob(
os.path.join(
self.instance_path, '*', 'etc', 'plugin',
'check-_monitor-ipv6-test-ipv6-packet-list-test.py'))[0]
# get promise module and check that parameters are ok
self.assertEqual(
getPromisePluginParameterDict(monitor_file),
{
'frequency': '720',
'address': 'monitor-ipv6-test'
}
)
def test_monitor_ipv4_test(self):
parameter_dict = self.assertSlaveBase('monitor-ipv4-test')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(httplib.SERVICE_UNAVAILABLE, result.status_code)
result_http = fakeHTTPResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
httplib.FOUND,
result_http.status_code
)
self.assertEqual(
'https://monitoripv4test.example.com:%s/test-path' % (HTTP_PORT,),
result_http.headers['Location']
)
monitor_file = glob.glob(
os.path.join(
self.instance_path, '*', 'etc', 'plugin',
'check-_monitor-ipv4-test-ipv4-packet-list-test.py'))[0]
# get promise module and check that parameters are ok
self.assertEqual(
getPromisePluginParameterDict(monitor_file),
{
'frequency': '720',
'ipv4': 'true',
'address': 'monitor-ipv4-test',
}
)
def test_ciphers(self):
parameter_dict = self.assertSlaveBase('ciphers')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(httplib.SERVICE_UNAVAILABLE, result.status_code)
result_http = fakeHTTPResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
httplib.FOUND,
result_http.status_code
)
self.assertEqual(
'https://ciphers.example.com:%s/test-path' % (HTTP_PORT,),
result_http.headers['Location']
)
configuration_file = glob.glob(
os.path.join(
self.instance_path, '*', 'etc', 'caddy-slave-conf.d', '_ciphers.conf'
))[0]
self.assertTrue(
'ciphers RSA-3DES-EDE-CBC-SHA RSA-AES128-CBC-SHA'
in open(configuration_file).read()
)
def test_enable_cache_custom_domain(self):
parameter_dict = self.assertSlaveBase(
'enable_cache_custom_domain',
hostname='customdomainenablecache')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper', headers={
'X-Reply-Header-Cache-Control': 'max-age=1, stale-while-'
'revalidate=3600, stale-if-error=3600'})
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
headers = self.assertResponseHeaders(result, True)
self.assertKeyWithPop('Age', headers)
self.assertEqual(
{
'Content-type': 'application/json',
'Set-Cookie': 'secured=value;secure, nonsecured=value',
'Cache-Control': 'max-age=1, stale-while-revalidate=3600, '
'stale-if-error=3600'
},
headers
)
backend_headers = result.json()['Incoming Headers']
self.assertBackendHeaders(
backend_headers, parameter_dict['domain'], cached=True)
def test_enable_cache_server_alias(self):
parameter_dict = self.assertSlaveBase('enable_cache_server_alias')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper', headers={
'X-Reply-Header-Cache-Control': 'max-age=1, stale-while-'
'revalidate=3600, stale-if-error=3600'})
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
headers = self.assertResponseHeaders(result, cached=True)
self.assertKeyWithPop('Age', headers)
self.assertEqual(
{
'Content-type': 'application/json',
'Set-Cookie': 'secured=value;secure, nonsecured=value',
'Cache-Control': 'max-age=1, stale-while-revalidate=3600, '
'stale-if-error=3600'
},
headers
)
backend_headers = result.json()['Incoming Headers']
self.assertBackendHeaders(
backend_headers, parameter_dict['domain'], cached=True)
result = fakeHTTPResult(
'enablecacheserveralias1.example.com',
'test-path/deep/.././deeper', headers={
'X-Reply-Header-Cache-Control': 'max-age=1, stale-while-'
'revalidate=3600, stale-if-error=3600'})
self.assertEqual(
httplib.FOUND,
result.status_code
)
self.assertEqual(
'https://enablecacheserveralias1.example.com:%s/test-path/deeper' % (
HTTP_PORT,),
result.headers['Location']
)
def test_enable_cache_https_only_false(self):
parameter_dict = self.assertSlaveBase('enable_cache-https-only-false')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper', headers={
'X-Reply-Header-Cache-Control': 'max-age=1, stale-while-'
'revalidate=3600, stale-if-error=3600'})
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
headers = self.assertResponseHeaders(result, cached=True)
self.assertKeyWithPop('Age', headers)
self.assertEqual(
{
'Content-type': 'application/json',
'Set-Cookie': 'secured=value;secure, nonsecured=value',
'Cache-Control': 'max-age=1, stale-while-revalidate=3600, '
'stale-if-error=3600'
},
headers
)
result = fakeHTTPResult(
parameter_dict['domain'],
'HTTPS/test', headers={
'X-Reply-Header-Cache-Control': 'max-age=1, stale-while-'
'revalidate=3600, stale-if-error=3600'})
self.assertEqual(httplib.OK, result.status_code)
self.assertEqualResultJson(result, 'Path', '/HTTPS/test')
self.assertResponseHeaders(result, cached=True)
def test_enable_cache(self):
parameter_dict = self.assertSlaveBase('enable_cache')
source_ip = '127.0.0.1'
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper', headers={
'X-Reply-Header-Cache-Control': 'max-age=1, stale-while-'
'revalidate=3600, stale-if-error=3600',
},
source_ip=source_ip
)
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
headers = self.assertResponseHeaders(result, cached=True)
self.assertKeyWithPop('Age', headers)
self.assertEqual(
{
'Content-type': 'application/json',
'Set-Cookie': 'secured=value;secure, nonsecured=value',
'Cache-Control': 'max-age=1, stale-while-revalidate=3600, '
'stale-if-error=3600'
},
headers
)
backend_headers = result.json()['Incoming Headers']
self.assertBackendHeaders(
backend_headers, parameter_dict['domain'], cached=True)
# BEGIN: Check that squid.log is correctly filled in
ats_log_file_list = glob.glob(
os.path.join(
self.instance_path, '*', 'var', 'log', 'trafficserver', 'squid.log'
))
self.assertEqual(1, len(ats_log_file_list))
ats_log_file = ats_log_file_list[0]
direct_pattern = re.compile(
r'.*TCP_MISS/200 .*test-path/deeper.*enablecache.example.com'
'.* - DIRECT*')
# ATS needs some time to flush logs
timeout = 10
b = time.time()
while True:
direct_pattern_match = 0
if (time.time() - b) > timeout:
break
with open(ats_log_file) as fh:
for line in fh.readlines():
if direct_pattern.match(line):
direct_pattern_match += 1
if direct_pattern_match > 0:
break
time.sleep(0.1)
with open(ats_log_file) as fh:
ats_log = fh.read()
self.assertRegexpMatches(ats_log, direct_pattern)
# END: Check that squid.log is correctly filled in
def _hack_ats(self, max_stale_age):
records_config = glob.glob(
os.path.join(
self.instance_path, '*', 'etc', 'trafficserver', 'records.config'
))
self.assertEqual(1, len(records_config))
self._hack_ats_records_config_path = records_config[0]
original_max_stale_age = \
'CONFIG proxy.config.http.cache.max_stale_age INT 604800\n'
new_max_stale_age = \
'CONFIG proxy.config.http.cache.max_stale_age INT %s\n' % (
max_stale_age,)
with open(self._hack_ats_records_config_path) as fh:
self._hack_ats_original_records_config = fh.readlines()
# sanity check - are we really do it?
self.assertIn(
original_max_stale_age,
self._hack_ats_original_records_config)
new_records_config = []
max_stale_age_changed = False
for line in self._hack_ats_original_records_config:
if line == original_max_stale_age:
line = new_max_stale_age
max_stale_age_changed = True
new_records_config.append(line)
self.assertTrue(max_stale_age_changed)
with open(self._hack_ats_records_config_path, 'w') as fh:
fh.write(''.join(new_records_config))
self._hack_ats_restart()
def _unhack_ats(self):
with open(self._hack_ats_records_config_path, 'w') as fh:
fh.write(''.join(self._hack_ats_original_records_config))
self._hack_ats_restart()
def _hack_ats_restart(self):
for process_info in self.callSupervisorMethod('getAllProcessInfo'):
if process_info['name'].startswith(
'trafficserver') and process_info['name'].endswith('-on-watch'):
self.callSupervisorMethod(
'stopProcess', '%(group)s:%(name)s' % process_info)
self.callSupervisorMethod(
'startProcess', '%(group)s:%(name)s' % process_info)
# give short time for the ATS to start back
time.sleep(5)
for process_info in self.callSupervisorMethod('getAllProcessInfo'):
if process_info['name'].startswith(
'trafficserver') and process_info['name'].endswith('-on-watch'):
self.assertEqual(process_info['statename'], 'RUNNING')
def test_enable_cache_negative_revalidate(self):
parameter_dict = self.assertSlaveBase('enable_cache')
source_ip = '127.0.0.1'
# have unique path for this test
path = self.id()
max_stale_age = 30
max_age = int(max_stale_age / 2.)
# body_200 is big enough to trigger
# https://github.com/apache/trafficserver/issues/7880
body_200 = b'Body 200' * 500
body_502 = b'Body 502'
body_502_new = b'Body 502 new'
body_200_new = b'Body 200 new'
self.addCleanup(self._unhack_ats)
self._hack_ats(max_stale_age)
def configureResult(status_code, body):
backend_url = self.getSlaveParameterDictDict()['enable_cache']['url']
result = requests.put(backend_url + path, headers={
'X-Reply-Header-Cache-Control': 'max-age=%s, public' % (max_age,),
'X-Reply-Status-Code': status_code,
'X-Reply-Body': base64.b64encode(body),
# drop Content-Length header to ensure
# https://github.com/apache/trafficserver/issues/7880
'X-Drop-Header': 'Content-Length',
})
self.assertEqual(result.status_code, httplib.CREATED)
def checkResult(status_code, body):
result = fakeHTTPSResult(
parameter_dict['domain'], path,
source_ip=source_ip
)
self.assertEqual(result.status_code, status_code)
self.assertEqual(result.text, body)
# backend returns something correctly
configureResult('200', body_200)
checkResult(httplib.OK, body_200)
configureResult('502', body_502)
time.sleep(1)
# even if backend returns 502, ATS gives cached result
checkResult(httplib.OK, body_200)
# interesting moment, time is between max_age and max_stale_age, triggers
# https://github.com/apache/trafficserver/issues/7880
time.sleep(max_age + 1)
checkResult(httplib.OK, body_200)
# max_stale_age passed, time to return 502 from the backend
time.sleep(max_stale_age + 2)
checkResult(httplib.BAD_GATEWAY, body_502)
configureResult('502', body_502_new)
time.sleep(1)
# even if there is new negative response on the backend, the old one is
# served from the cache
checkResult(httplib.BAD_GATEWAY, body_502)
time.sleep(max_age + 2)
# now as max-age of negative response passed, the new one is served
checkResult(httplib.BAD_GATEWAY, body_502_new)
configureResult('200', body_200_new)
time.sleep(1)
checkResult(httplib.BAD_GATEWAY, body_502_new)
time.sleep(max_age + 2)
# backend is back to normal, as soon as negative response max-age passed
# the new response is served
checkResult(httplib.OK, body_200_new)
@skip('Feature postponed')
def test_enable_cache_stale_if_error_respected(self):
parameter_dict = self.assertSlaveBase('enable_cache')
source_ip = '127.0.0.1'
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper', headers={
'X-Reply-Header-Cache-Control': 'max-age=1, stale-while-'
'revalidate=3600, stale-if-error=3600',
},
source_ip=source_ip
)
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
headers = self.assertResponseHeaders(result)
self.assertKeyWithPop('Age', headers)
self.assertEqual(
{
'Content-type': 'application/json',
'Set-Cookie': 'secured=value;secure, nonsecured=value',
'Cache-Control': 'max-age=1, stale-while-revalidate=3600, '
'stale-if-error=3600'
},
headers
)
backend_headers = result.json()['Incoming Headers']
self.assertBackendHeaders(
backend_headers, parameter_dict['domain'], cached=True)
# check stale-if-error support is really respected if not present in the
# request
# wait a bit for max-age to expire
time.sleep(2)
# real check: cache access does not provide old data with stopped backend
try:
# stop the backend, to have error on while connecting to it
self.stopServerProcess()
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper', headers={
'X-Reply-Header-Cache-Control': 'max-age=1',
},
source_ip=source_ip
)
self.assertEqual(result.status_code, httplib.BAD_GATEWAY)
finally:
self.startServerProcess()
# END: check stale-if-error support
def test_enable_cache_ats_timeout(self):
parameter_dict = self.assertSlaveBase('enable_cache')
# check that timeout seen by ATS does not result in many queries done
# to the backend and that next request works like a charm
result = fakeHTTPSResult(
parameter_dict['domain'],
'test_enable_cache_ats_timeout', headers={
'Timeout': '15',
'X-Reply-Header-Cache-Control': 'max-age=1, stale-while-'
'revalidate=3600, stale-if-error=3600'})
# ATS timed out
self.assertEqual(
httplib.GATEWAY_TIMEOUT,
result.status_code
)
backend_haproxy_log_file = glob.glob(
os.path.join(
self.instance_path, '*', 'var', 'log', 'backend-haproxy.log'
))[0]
matching_line_amount = 0
pattern = re.compile(
r'.* _enable_cache-http.backend .* 504 .*'
'"GET .test_enable_cache_ats_timeout HTTP.1.1"$')
with open(backend_haproxy_log_file) as fh:
for line in fh.readlines():
if pattern.match(line):
matching_line_amount += 1
# Haproxy backend received maximum one connection
self.assertIn(matching_line_amount, [0, 1])
timeout = 5
b = time.time()
# ATS created squid.log with a delay
while True:
if (time.time() - b) > timeout:
self.fail('Squid log file did not appear in %ss' % (timeout,))
ats_log_file_list = glob.glob(
os.path.join(
self.instance_path, '*', 'var', 'log', 'trafficserver', 'squid.log'
))
if len(ats_log_file_list) == 1:
ats_log_file = ats_log_file_list[0]
break
time.sleep(0.1)
pattern = re.compile(
r'.*ERR_READ_TIMEOUT/504 .*test_enable_cache_ats_timeout'
'.*TIMEOUT_DIRECT*')
timeout = 10
b = time.time()
# ATS needs some time to flush logs
while True:
matching_line_amount = 0
if (time.time() - b) > timeout:
break
with open(ats_log_file) as fh:
for line in fh.readlines():
if pattern.match(line):
matching_line_amount += 1
if matching_line_amount > 0:
break
time.sleep(0.1)
# ATS has maximum one entry for this query
self.assertIn(matching_line_amount, [0, 1])
# the result is available immediately after
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper', headers={
'X-Reply-Header-Cache-Control': 'max-age=1, stale-while-'
'revalidate=3600, stale-if-error=3600'})
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
def test_enable_cache_disable_no_cache_request(self):
parameter_dict = self.assertSlaveBase(
'enable_cache-disable-no-cache-request')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path',
headers={'Pragma': 'no-cache', 'Cache-Control': 'something'})
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
headers = self.assertResponseHeaders(result, cached=True)
self.assertKeyWithPop('Age', headers)
self.assertEqual(
{
'Content-type': 'application/json',
'Set-Cookie': 'secured=value;secure, nonsecured=value'
},
headers
)
backend_headers = result.json()['Incoming Headers']
self.assertBackendHeaders(
backend_headers, parameter_dict['domain'], cached=True)
try:
j = result.json()
except Exception:
raise ValueError('JSON decode problem in:\n%s' % (result.text,))
self.assertFalse('pragma' in j['Incoming Headers'].keys())
def test_enable_cache_disable_via_header(self):
parameter_dict = self.assertSlaveBase('enable_cache-disable-via-header')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
headers = self.assertResponseHeaders(result, via=False)
self.assertKeyWithPop('Age', headers)
self.assertEqual(
{
'Content-type': 'application/json',
'Set-Cookie': 'secured=value;secure, nonsecured=value',
},
headers
)
backend_headers = result.json()['Incoming Headers']
self.assertBackendHeaders(
backend_headers, parameter_dict['domain'], cached=True)
def test_enable_http2_false(self):
parameter_dict = self.assertSlaveBase('enable-http2-false')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
headers = self.assertResponseHeaders(result)
self.assertEqual(
{
'Content-Type': 'application/json',
'Set-Cookie': 'secured=value;secure, nonsecured=value',
},
headers
)
self.assertFalse(
isHTTP2(parameter_dict['domain']))
def test_enable_http2_default(self):
parameter_dict = self.assertSlaveBase('enable-http2-default')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
headers = self.assertResponseHeaders(result)
self.assertEqual(
{
'Content-type': 'application/json',
'Set-Cookie': 'secured=value;secure, nonsecured=value',
},
headers
)
self.assertTrue(
isHTTP2(parameter_dict['domain']))
def test_prefer_gzip_encoding_to_backend_https_only(self):
parameter_dict = self.assertSlaveBase(
'prefer-gzip-encoding-to-backend-https-only')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={'Accept-Encoding': 'gzip, deflate'})
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
self.assertBackendHeaders(
result.json()['Incoming Headers'], parameter_dict['domain'])
self.assertEqual(
'gzip', result.json()['Incoming Headers']['accept-encoding'])
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={'Accept-Encoding': 'deflate'})
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
self.assertBackendHeaders(
result.json()['Incoming Headers'], parameter_dict['domain'])
self.assertEqual(
'deflate', result.json()['Incoming Headers']['accept-encoding'])
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
result = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={'Accept-Encoding': 'gzip, deflate'})
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
self.assertBackendHeaders(
result.json()['Incoming Headers'], parameter_dict['domain'],
port=HTTP_PORT, proto='http')
self.assertEqual(
'gzip', result.json()['Incoming Headers']['accept-encoding'])
result = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={'Accept-Encoding': 'deflate'})
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
self.assertBackendHeaders(
result.json()['Incoming Headers'], parameter_dict['domain'],
port=HTTP_PORT, proto='http')
self.assertEqual(
'deflate', result.json()['Incoming Headers']['accept-encoding'])
result = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
result = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
def test_prefer_gzip_encoding_to_backend(self):
parameter_dict = self.assertSlaveBase(
'prefer-gzip-encoding-to-backend')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={'Accept-Encoding': 'gzip, deflate'})
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
self.assertBackendHeaders(
result.json()['Incoming Headers'], parameter_dict['domain'])
self.assertEqual(
'gzip', result.json()['Incoming Headers']['accept-encoding'])
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={'Accept-Encoding': 'deflate'})
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
self.assertBackendHeaders(
result.json()['Incoming Headers'], parameter_dict['domain'])
self.assertEqual(
'deflate', result.json()['Incoming Headers']['accept-encoding'])
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
result = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={'Accept-Encoding': 'gzip, deflate'})
self.assertEqual(
httplib.FOUND,
result.status_code
)
self.assertEqual(
'https://%s:%s/test-path/deeper' % (parameter_dict['domain'], HTTP_PORT),
result.headers['Location']
)
result = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={'Accept-Encoding': 'deflate'})
self.assertEqual(
httplib.FOUND,
result.status_code
)
self.assertEqual(
'https://%s:%s/test-path/deeper' % (parameter_dict['domain'], HTTP_PORT),
result.headers['Location']
)
result = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
httplib.FOUND,
result.status_code
)
self.assertEqual(
'https://%s:%s/test-path/deeper' % (parameter_dict['domain'], HTTP_PORT),
result.headers['Location']
)
result = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
httplib.FOUND,
result.status_code
)
self.assertEqual(
'https://%s:%s/test-path/deeper' % (parameter_dict['domain'], HTTP_PORT),
result.headers['Location']
)
def test_disabled_cookie_list(self):
parameter_dict = self.assertSlaveBase('disabled-cookie-list')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path',
cookies=dict(
Chocolate='absent',
Vanilia='absent',
Coffee='present'
))
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
self.assertBackendHeaders(
result.json()['Incoming Headers'], parameter_dict['domain'])
self.assertEqual(
'Coffee=present', result.json()['Incoming Headers']['cookie'])
def test_https_url(self):
parameter_dict = self.assertSlaveBase('url_https-url')
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(
'max-age=200; includeSubDomains; preload',
result.headers['Strict-Transport-Security'])
self.assertEqualResultJson(result, 'Path', '/https/test-path/deeper')
result_http = fakeHTTPResult(
parameter_dict['domain'],
'test-path/deep/.././deeper')
self.assertEqual(
httplib.FOUND,
result_http.status_code
)
self.assertNotIn('Strict-Transport-Security', result_http.headers)
self.assertEqual(
'https://urlhttpsurl.example.com:%s/test-path/deeper' % (HTTP_PORT,),
result_http.headers['Location']
)
# check that timeouts are correctly set in the haproxy configuration
backend_configuration_file = glob.glob(os.path.join(
self.instance_path, '*', 'etc', 'backend-haproxy.cfg'))[0]
with open(backend_configuration_file) as fh:
content = fh.read()
self.assertTrue("""backend _url_https-url-http
timeout server 15s
timeout connect 10s
retries 5""" in content)
def test_https_url_netloc_list(self):
parameter_dict = self.assertSlaveBase('https-url-netloc-list')
result = fakeHTTPSResult(parameter_dict['domain'], 'path')
# assure that the request went to backend specified in the netloc
self.assertEqual(
result.headers['X-Backend-Identification'],
'netloc'
)
result = fakeHTTPResult(parameter_dict['domain'], 'path')
# assure that the request went to backend NOT specified in the netloc
self.assertNotIn('X-Backend-Identification', result.headers)
class TestReplicateSlave(SlaveHttpFrontendTestCase, TestDataMixin):
instance_parameter_dict = {
'domain': 'example.com',
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
}
@classmethod
def getInstanceParameterDict(cls):
return cls.instance_parameter_dict
@classmethod
def getSlaveParameterDictDict(cls):
return {
'replicate': {
'url': cls.backend_url,
'enable_cache': True,
},
}
def test(self):
# now instantiate 2nd partition in started state
# and due to port collision, stop the first one...
self.instance_parameter_dict.update({
'-frontend-quantity': 2,
'-sla-2-computer_guid': self.slap._computer_id,
'-frontend-1-state': 'stopped',
'-frontend-2-state': 'started',
})
self.requestDefaultInstance()
self.requestSlaves()
self.slap.waitForInstance(self.instance_max_retry)
# ...and be nice, put back the first one online
self.instance_parameter_dict.update({
'-frontend-1-state': 'started',
'-frontend-2-state': 'stopped',
})
self.requestDefaultInstance()
self.slap.waitForInstance(self.instance_max_retry)
self.slap.waitForInstance(self.instance_max_retry)
self.slap.waitForInstance(self.instance_max_retry)
self.updateSlaveConnectionParameterDictDict()
# the real assertions follow...
parameter_dict = self.parseSlaveParameterDict('replicate')
self.assertLogAccessUrlWithPop(parameter_dict)
self.assertKedifaKeysWithPop(parameter_dict)
key_list = [
'caddy-frontend-1-node-information-json',
'caddy-frontend-2-node-information-json'
]
node_information_json_dict = {}
for k in parameter_dict.keys():
if k.startswith('caddy-frontend') and k.endswith(
'node-information-json'):
node_information_json_dict[k] = parameter_dict.pop(k)
self.assertEqual(
key_list,
node_information_json_dict.keys()
)
node_information_dict = json.loads(node_information_json_dict[key_list[0]])
self.assertIn("node-id", node_information_dict)
self.assertIn("version-hash-history", node_information_dict)
self.node_information_dict = node_information_dict
self.assertEqual(
{
'domain': 'replicate.example.com',
'replication_number': '2',
'url': 'http://replicate.example.com',
'site_url': 'http://replicate.example.com',
'secure_access': 'https://replicate.example.com',
'backend-client-caucase-url': 'http://[%s]:8990' % self._ipv6_address,
},
parameter_dict
)
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
result_http = fakeHTTPResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(httplib.FOUND, result_http.status_code)
# prove 2nd frontend by inspection of the instance
slave_configuration_name = '_replicate.conf'
slave_configuration_file_list = [
'/'.join([f[0], slave_configuration_name]) for f in [
q for q in os.walk(self.instance_path)
if slave_configuration_name in q[2]
]
]
self.assertEqual(
2, len(slave_configuration_file_list), slave_configuration_file_list)
class TestReplicateSlaveOtherDestroyed(SlaveHttpFrontendTestCase):
instance_parameter_dict = {
'domain': 'example.com',
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
}
@classmethod
def getInstanceParameterDict(cls):
return cls.instance_parameter_dict
@classmethod
def getSlaveParameterDictDict(cls):
return {
'empty': {
'url': cls.backend_url,
'enable_cache': True,
}
}
def test_extra_slave_instance_list_not_present_destroyed_request(self):
# now instantiate 2nd partition in started state
# and due to port collision, stop the first one
self.instance_parameter_dict.update({
'-frontend-quantity': 2,
'-sla-2-computer_guid': self.slap._computer_id,
'-frontend-1-state': 'stopped',
'-frontend-2-state': 'started',
})
self.requestDefaultInstance()
self.slap.waitForInstance(self.instance_max_retry)
# now start back first instance, and destroy 2nd one
self.instance_parameter_dict.update({
'-frontend-1-state': 'started',
'-frontend-2-state': 'destroyed',
})
self.requestDefaultInstance()
self.slap.waitForInstance(self.instance_max_retry)
self.slap.waitForInstance(self.instance_max_retry)
self.slap.waitForInstance(self.instance_max_retry)
buildout_file = os.path.join(
self.getMasterPartitionPath(), 'instance-caddy-replicate.cfg')
with open(buildout_file) as fh:
buildout_file_content = fh.read()
node_1_present = re.search(
"^config-frontend-name = !py!'caddy-frontend-1'$",
buildout_file_content, flags=re.M) is not None
node_2_present = re.search(
"^config-frontend-name = !py!'caddy-frontend-2'$",
buildout_file_content, flags=re.M) is not None
self.assertTrue(node_1_present)
self.assertFalse(node_2_present)
class TestEnableHttp2ByDefaultFalseSlave(SlaveHttpFrontendTestCase,
TestDataMixin):
@classmethod
def getInstanceParameterDict(cls):
return {
'domain': 'example.com',
'enable-http2-by-default': 'false',
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
}
@classmethod
def getSlaveParameterDictDict(cls):
return {
'enable-http2-default': {
},
'enable-http2-false': {
'enable-http2': 'false',
},
'enable-http2-true': {
'enable-http2': 'true',
},
'dummy-cached': {
'url': cls.backend_url,
'enable_cache': True,
}
}
def test_enable_http2_default(self):
parameter_dict = self.assertSlaveBase('enable-http2-default')
self.assertFalse(
isHTTP2(parameter_dict['domain']))
def test_enable_http2_false(self):
parameter_dict = self.assertSlaveBase('enable-http2-false')
self.assertFalse(
isHTTP2(parameter_dict['domain']))
def test_enable_http2_true(self):
parameter_dict = self.assertSlaveBase('enable-http2-true')
self.assertTrue(
isHTTP2(parameter_dict['domain']))
class TestEnableHttp2ByDefaultDefaultSlave(SlaveHttpFrontendTestCase,
TestDataMixin):
@classmethod
def getInstanceParameterDict(cls):
return {
'domain': 'example.com',
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
}
@classmethod
def getSlaveParameterDictDict(cls):
return {
'enable-http2-default': {
},
'enable-http2-false': {
'enable-http2': 'false',
},
'enable-http2-true': {
'enable-http2': 'true',
},
'dummy-cached': {
'url': cls.backend_url,
'enable_cache': True,
}
}
def test_enable_http2_default(self):
parameter_dict = self.assertSlaveBase('enable-http2-default')
self.assertTrue(
isHTTP2(parameter_dict['domain']))
def test_enable_http2_false(self):
parameter_dict = self.assertSlaveBase('enable-http2-false')
self.assertFalse(
isHTTP2(parameter_dict['domain']))
def test_enable_http2_true(self):
parameter_dict = self.assertSlaveBase('enable-http2-true')
self.assertTrue(
isHTTP2(parameter_dict['domain']))
class TestRe6stVerificationUrlDefaultSlave(SlaveHttpFrontendTestCase,
TestDataMixin):
@classmethod
def getInstanceParameterDict(cls):
return {
'domain': 'example.com',
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
}
@classmethod
def getSlaveParameterDictDict(cls):
return {
'default': {
'url': cls.backend_url,
'enable_cache': True
},
}
@classmethod
def waitForSlave(cls):
# no need to wait for slave availability here
return True
def test_default(self):
self.assertSlaveBase('default')
re6st_connectivity_promise_list = glob.glob(
os.path.join(
self.instance_path, '*', 'etc', 'plugin',
're6st-connectivity.py'))
self.assertEqual(1, len(re6st_connectivity_promise_list))
re6st_connectivity_promise_file = re6st_connectivity_promise_list[0]
self.assertEqual(
getPromisePluginParameterDict(re6st_connectivity_promise_file),
{
'url': 'http://[2001:67c:1254:4::1]/index.html',
}
)
class TestRe6stVerificationUrlSlave(SlaveHttpFrontendTestCase,
TestDataMixin):
instance_parameter_dict = {
'port': HTTPS_PORT,
'domain': 'example.com',
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
}
@classmethod
def getInstanceParameterDict(cls):
return cls.instance_parameter_dict
@classmethod
def getSlaveParameterDictDict(cls):
return {
'default': {
'url': cls.backend_url,
'enable_cache': True,
},
}
def test_default(self):
self.instance_parameter_dict[
're6st-verification-url'] = 'some-re6st-verification-url'
# re-request instance with updated parameters
self.requestDefaultInstance()
# run once instance, it's only needed for later checks
try:
self.slap.waitForInstance()
except Exception:
pass
self.assertSlaveBase('default')
re6st_connectivity_promise_list = glob.glob(
os.path.join(
self.instance_path, '*', 'etc', 'plugin',
're6st-connectivity.py'))
self.assertEqual(1, len(re6st_connectivity_promise_list))
re6st_connectivity_promise_file = re6st_connectivity_promise_list[0]
self.assertEqual(
getPromisePluginParameterDict(re6st_connectivity_promise_file),
{
'url': 'some-re6st-verification-url',
}
)
class TestSlaveGlobalDisableHttp2(TestSlave):
@classmethod
def getInstanceParameterDict(cls):
instance_parameter_dict = super(
TestSlaveGlobalDisableHttp2, cls).getInstanceParameterDict()
instance_parameter_dict['global-disable-http2'] = 'TrUe'
return instance_parameter_dict
def test_enable_http2_default(self):
parameter_dict = self.assertSlaveBase('enable-http2-default')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
headers = self.assertResponseHeaders(result)
self.assertEqual(
{
'Content-type': 'application/json',
'Set-Cookie': 'secured=value;secure, nonsecured=value',
},
headers
)
self.assertFalse(
isHTTP2(parameter_dict['domain']))
class TestEnableHttp2ByDefaultFalseSlaveGlobalDisableHttp2(
TestEnableHttp2ByDefaultFalseSlave):
@classmethod
def getInstanceParameterDict(cls):
instance_parameter_dict = super(
TestEnableHttp2ByDefaultFalseSlaveGlobalDisableHttp2,
cls).getInstanceParameterDict()
instance_parameter_dict['global-disable-http2'] = 'TrUe'
return instance_parameter_dict
def test_enable_http2_true(self):
parameter_dict = self.assertSlaveBase('enable-http2-true')
self.assertFalse(
isHTTP2(parameter_dict['domain']))
class TestEnableHttp2ByDefaultDefaultSlaveGlobalDisableHttp2(
TestEnableHttp2ByDefaultDefaultSlave):
@classmethod
def getInstanceParameterDict(cls):
instance_parameter_dict = super(
TestEnableHttp2ByDefaultDefaultSlaveGlobalDisableHttp2,
cls).getInstanceParameterDict()
instance_parameter_dict['global-disable-http2'] = 'TrUe'
return instance_parameter_dict
def test_enable_http2_true(self):
parameter_dict = self.assertSlaveBase('enable-http2-true')
self.assertFalse(
isHTTP2(parameter_dict['domain']))
def test_enable_http2_default(self):
parameter_dict = self.assertSlaveBase('enable-http2-default')
self.assertFalse(
isHTTP2(parameter_dict['domain']))
class TestSlaveSlapOSMasterCertificateCompatibilityOverrideMaster(
SlaveHttpFrontendTestCase, TestDataMixin):
@classmethod
def setUpMaster(cls):
# run partition until AIKC finishes
cls.runComputerPartitionUntil(
cls.untilNotReadyYetNotInMasterKeyGenerateAuthUrl)
parameter_dict = cls.requestDefaultInstance().getConnectionParameterDict()
cls._fetchKedifaCaucaseCaCertificateFile(parameter_dict)
# Do not upload certificates for the master partition
@classmethod
def getInstanceParameterDict(cls):
return {
'domain': 'example.com',
'apache-certificate': cls.certificate_pem,
'apache-key': cls.key_pem,
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
'mpm-graceful-shutdown-timeout': 2,
}
@classmethod
def getSlaveParameterDictDict(cls):
return {
'ssl_from_master_kedifa_overrides_master_certificate': {
'url': cls.backend_url,
'enable_cache': True
},
}
def test_ssl_from_master_kedifa_overrides_master_certificate(self):
parameter_dict = self.assertSlaveBase(
'ssl_from_master_kedifa_overrides_master_certificate')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
_, key_pem, _, certificate_pem = \
createSelfSignedCertificate([parameter_dict['domain']])
master_parameter_dict = \
self.requestDefaultInstance().getConnectionParameterDict()
auth = requests.get(
master_parameter_dict['master-key-generate-auth-url'],
verify=self.kedifa_caucase_ca_certificate_file)
requests.put(
master_parameter_dict['master-key-upload-url'] + auth.text,
data=key_pem + certificate_pem,
verify=self.kedifa_caucase_ca_certificate_file)
self.runKedifaUpdater()
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
class TestSlaveSlapOSMasterCertificateCompatibility(
SlaveHttpFrontendTestCase, TestDataMixin):
@classmethod
def setUpMaster(cls):
# run partition until AIKC finishes
cls.runComputerPartitionUntil(
cls.untilNotReadyYetNotInMasterKeyGenerateAuthUrl)
parameter_dict = cls.requestDefaultInstance().getConnectionParameterDict()
cls._fetchKedifaCaucaseCaCertificateFile(parameter_dict)
# Do not upload certificates for the master partition
@classmethod
def prepareCertificate(cls):
_, cls.ssl_from_slave_key_pem, _, cls.ssl_from_slave_certificate_pem = \
createSelfSignedCertificate(
[
'sslfromslave.example.com',
])
_, cls.ssl_from_slave_kedifa_overrides_key_pem, _, \
cls.ssl_from_slave_kedifa_overrides_certificate_pem = \
createSelfSignedCertificate(
[
'sslfromslavekedifaoverrides.example.com',
])
_, cls.type_notebook_ssl_from_slave_key_pem, _, \
cls.type_notebook_ssl_from_slave_certificate_pem = \
createSelfSignedCertificate(
[
'typenotebooksslfromslave.example.com',
])
_, cls.type_notebook_ssl_from_slave_kedifa_overrides_key_pem, _, \
cls.type_notebook_ssl_from_slave_kedifa_overrides_certificate_pem = \
createSelfSignedCertificate(
[
'typenotebooksslfromslavekedifaoverrides.example.com',
])
cls.ca = CertificateAuthority(
'TestSlaveSlapOSMasterCertificateCompatibility')
_, cls.customdomain_ca_key_pem, csr, _ = createCSR(
'customdomainsslcrtsslkeysslcacrt.example.com')
_, cls.customdomain_ca_certificate_pem = cls.ca.signCSR(csr)
_, cls.sslcacrtgarbage_ca_key_pem, csr, _ = createCSR(
'sslcacrtgarbage.example.com')
_, cls.sslcacrtgarbage_ca_certificate_pem = cls.ca.signCSR(csr)
_, cls.ssl_from_slave_ca_key_pem, csr, _ = createCSR(
'sslfromslave.example.com')
_, cls.ssl_from_slave_ca_certificate_pem = cls.ca.signCSR(csr)
_, cls.customdomain_key_pem, _, cls.customdomain_certificate_pem = \
createSelfSignedCertificate(['customdomainsslcrtsslkey.example.com'])
super(
TestSlaveSlapOSMasterCertificateCompatibility, cls).prepareCertificate()
@classmethod
def getInstanceParameterDict(cls):
return {
'domain': 'example.com',
'apache-certificate': cls.certificate_pem,
'apache-key': cls.key_pem,
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
'mpm-graceful-shutdown-timeout': 2,
}
@classmethod
def getSlaveParameterDictDict(cls):
return {
'ssl_from_master': {
'url': cls.backend_url,
'enable_cache': True,
},
'ssl_from_master_kedifa_overrides': {
'url': cls.backend_url,
},
'ssl_from_slave': {
'url': cls.backend_url,
'ssl_crt': cls.ssl_from_slave_certificate_pem,
'ssl_key': cls.ssl_from_slave_key_pem,
},
'ssl_from_slave_kedifa_overrides': {
'url': cls.backend_url,
'ssl_crt': cls.ssl_from_slave_kedifa_overrides_certificate_pem,
'ssl_key': cls.ssl_from_slave_kedifa_overrides_key_pem,
},
'custom_domain_ssl_crt_ssl_key': {
'url': cls.backend_url,
'ssl_crt': cls.customdomain_certificate_pem,
'ssl_key': cls.customdomain_key_pem,
'custom_domain': 'customdomainsslcrtsslkey.example.com'
},
'custom_domain_ssl_crt_ssl_key_ssl_ca_crt': {
'url': cls.backend_url,
'ssl_crt': cls.customdomain_ca_certificate_pem,
'ssl_key': cls.customdomain_ca_key_pem,
'ssl_ca_crt': cls.ca.certificate_pem,
'custom_domain': 'customdomainsslcrtsslkeysslcacrt.example.com',
},
'ssl_ca_crt_garbage': {
'url': cls.backend_url,
'ssl_crt': cls.sslcacrtgarbage_ca_certificate_pem,
'ssl_key': cls.sslcacrtgarbage_ca_key_pem,
'ssl_ca_crt': 'some garbage',
},
'ssl_ca_crt_does_not_match': {
'url': cls.backend_url,
'ssl_crt': cls.certificate_pem,
'ssl_key': cls.key_pem,
'ssl_ca_crt': cls.ca.certificate_pem,
},
'type-notebook-ssl_from_master': {
'url': cls.backend_url,
'type': 'notebook',
},
'type-notebook-ssl_from_slave': {
'url': cls.backend_url,
'ssl_crt': cls.type_notebook_ssl_from_slave_certificate_pem,
'ssl_key': cls.type_notebook_ssl_from_slave_key_pem,
'type': 'notebook',
},
'type-notebook-ssl_from_master_kedifa_overrides': {
'url': cls.backend_url,
'type': 'notebook',
},
'type-notebook-ssl_from_slave_kedifa_overrides': {
'url': cls.backend_url,
'ssl_crt':
cls.type_notebook_ssl_from_slave_kedifa_overrides_certificate_pem,
'ssl_key':
cls.type_notebook_ssl_from_slave_kedifa_overrides_key_pem,
'type': 'notebook',
}
}
def test_master_partition_state(self):
parameter_dict = self.parseConnectionParameterDict()
self.assertKeyWithPop('monitor-setup-url', parameter_dict)
self.assertBackendHaproxyStatisticUrl(parameter_dict)
self.assertKedifaKeysWithPop(parameter_dict, 'master-')
self.assertNodeInformationWithPop(parameter_dict)
self.assertRejectedSlavePromiseEmptyWithPop(parameter_dict)
expected_parameter_dict = {
'monitor-base-url': 'https://[%s]:8401' % self._ipv6_address,
'backend-client-caucase-url': 'http://[%s]:8990' % self._ipv6_address,
'domain': 'example.com',
'accepted-slave-amount': '12',
'rejected-slave-amount': '0',
'slave-amount': '12',
'rejected-slave-dict': {
},
'warning-list': [
u'apache-certificate is obsolete, please use master-key-upload-url',
u'apache-key is obsolete, please use master-key-upload-url',
],
'warning-slave-dict': {
u'_custom_domain_ssl_crt_ssl_key': [
u'ssl_crt is obsolete, please use key-upload-url',
u'ssl_key is obsolete, please use key-upload-url'
],
u'_custom_domain_ssl_crt_ssl_key_ssl_ca_crt': [
u'ssl_ca_crt is obsolete, please use key-upload-url',
u'ssl_crt is obsolete, please use key-upload-url',
u'ssl_key is obsolete, please use key-upload-url'
],
u'_ssl_ca_crt_does_not_match': [
u'ssl_ca_crt is obsolete, please use key-upload-url',
u'ssl_crt is obsolete, please use key-upload-url',
u'ssl_key is obsolete, please use key-upload-url',
],
u'_ssl_ca_crt_garbage': [
u'ssl_ca_crt is obsolete, please use key-upload-url',
u'ssl_crt is obsolete, please use key-upload-url',
u'ssl_key is obsolete, please use key-upload-url',
],
# u'_ssl_ca_crt_only': [
# u'ssl_ca_crt is obsolete, please use key-upload-url',
# ],
u'_ssl_from_slave': [
u'ssl_crt is obsolete, please use key-upload-url',
u'ssl_key is obsolete, please use key-upload-url',
],
u'_ssl_from_slave_kedifa_overrides': [
u'ssl_crt is obsolete, please use key-upload-url',
u'ssl_key is obsolete, please use key-upload-url',
],
# u'_ssl_key-ssl_crt-unsafe': [
# u'ssl_key is obsolete, please use key-upload-url',
# u'ssl_crt is obsolete, please use key-upload-url',
# ],
u'_type-notebook-ssl_from_slave': [
u'ssl_crt is obsolete, please use key-upload-url',
u'ssl_key is obsolete, please use key-upload-url',
],
u'_type-notebook-ssl_from_slave_kedifa_overrides': [
u'ssl_crt is obsolete, please use key-upload-url',
u'ssl_key is obsolete, please use key-upload-url',
],
}
}
self.assertEqual(
expected_parameter_dict,
parameter_dict
)
def test_ssl_from_master(self):
parameter_dict = self.assertSlaveBase('ssl_from_master')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
def test_ssl_from_master_kedifa_overrides(self):
parameter_dict = self.assertSlaveBase('ssl_from_master_kedifa_overrides')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
_, key_pem, _, certificate_pem = \
createSelfSignedCertificate([parameter_dict['domain']])
# as now the place to put the key is known put the key there
auth = requests.get(
self.current_generate_auth,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, auth.status_code)
data = certificate_pem + key_pem
upload = requests.put(
self.current_upload_url + auth.text,
data=data,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, upload.status_code)
self.runKedifaUpdater()
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
def test_ssl_from_slave(self):
parameter_dict = self.assertSlaveBase(
'ssl_from_slave',
expected_parameter_dict={
'warning-list': [
'ssl_crt is obsolete, please use key-upload-url',
'ssl_key is obsolete, please use key-upload-url',
]
})
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.ssl_from_slave_certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
def test_ssl_from_slave_kedifa_overrides(self):
parameter_dict = self.assertSlaveBase(
'ssl_from_slave_kedifa_overrides',
expected_parameter_dict={
'warning-list': ['ssl_crt is obsolete, please use key-upload-url',
'ssl_key is obsolete, please use key-upload-url']
})
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.ssl_from_slave_kedifa_overrides_certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
_, key_pem, _, certificate_pem = \
createSelfSignedCertificate([parameter_dict['domain']])
# as now the place to put the key is known put the key there
auth = requests.get(
self.current_generate_auth,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, auth.status_code)
data = certificate_pem + key_pem
upload = requests.put(
self.current_upload_url + auth.text,
data=data,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, upload.status_code)
self.runKedifaUpdater()
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
def test_type_notebook_ssl_from_master(self):
parameter_dict = self.assertSlaveBase('type-notebook-ssl_from_master')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path',
HTTPS_PORT)
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
def test_type_notebook_ssl_from_master_kedifa_overrides(self):
parameter_dict = self.assertSlaveBase(
'type-notebook-ssl_from_master_kedifa_overrides')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path',
HTTPS_PORT)
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
_, key_pem, _, certificate_pem = \
createSelfSignedCertificate([parameter_dict['domain']])
# as now the place to put the key is known put the key there
auth = requests.get(
self.current_generate_auth,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, auth.status_code)
data = certificate_pem + key_pem
upload = requests.put(
self.current_upload_url + auth.text,
data=data,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, upload.status_code)
self.runKedifaUpdater()
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path',
HTTPS_PORT)
self.assertEqual(
certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
def test_type_notebook_ssl_from_slave(self):
parameter_dict = self.assertSlaveBase(
'type-notebook-ssl_from_slave',
expected_parameter_dict={
'warning-list': [
'ssl_crt is obsolete, please use key-upload-url',
'ssl_key is obsolete, please use key-upload-url',
]
})
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path',
HTTPS_PORT)
self.assertEqual(
self.type_notebook_ssl_from_slave_certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
def test_type_notebook_ssl_from_slave_kedifa_overrides(self):
parameter_dict = self.assertSlaveBase(
'type-notebook-ssl_from_slave_kedifa_overrides',
expected_parameter_dict={
'warning-list': ['ssl_crt is obsolete, please use key-upload-url',
'ssl_key is obsolete, please use key-upload-url']
})
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path',
HTTPS_PORT)
self.assertEqual(
self.type_notebook_ssl_from_slave_kedifa_overrides_certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
_, key_pem, _, certificate_pem = \
createSelfSignedCertificate([parameter_dict['domain']])
# as now the place to put the key is known put the key there
auth = requests.get(
self.current_generate_auth,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, auth.status_code)
data = certificate_pem + key_pem
upload = requests.put(
self.current_upload_url + auth.text,
data=data,
verify=self.kedifa_caucase_ca_certificate_file)
self.assertEqual(httplib.CREATED, upload.status_code)
self.runKedifaUpdater()
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path',
HTTPS_PORT)
self.assertEqual(
certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
@skip('Not implemented in new test system')
def test_custom_domain_ssl_crt_ssl_key(self):
parameter_dict = self.assertSlaveBase(
'custom_domain_ssl_crt_ssl_key',
expected_parameter_dict={
'warning-list': ['ssl_key is obsolete, please use key-upload-url',
'ssl_crt is obsolete, please use key-upload-url']
})
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.customdomain_certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
def test_ssl_ca_crt(self):
parameter_dict = self.assertSlaveBase(
'custom_domain_ssl_crt_ssl_key_ssl_ca_crt',
expected_parameter_dict={
'warning-list': [
'ssl_ca_crt is obsolete, please use key-upload-url',
'ssl_crt is obsolete, please use key-upload-url',
'ssl_key is obsolete, please use key-upload-url'
]
})
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.customdomain_ca_certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
certificate_file_list = glob.glob(os.path.join(
self.instance_path, '*', 'srv', 'bbb-ssl',
'_custom_domain_ssl_crt_ssl_key_ssl_ca_crt.crt'))
self.assertEqual(1, len(certificate_file_list))
certificate_file = certificate_file_list[0]
with open(certificate_file) as out:
expected = self.customdomain_ca_certificate_pem + '\n' + \
self.ca.certificate_pem + '\n' + self.customdomain_ca_key_pem
self.assertEqual(
expected,
out.read()
)
ca = CertificateAuthority(
'TestSlaveSlapOSMasterCertificateCompatibility')
_, customdomain_ca_key_pem, csr, _ = createCSR(
'customdomainsslcrtsslkeysslcacrt.example.com')
_, customdomain_ca_certificate_pem = ca.signCSR(csr)
slave_parameter_dict = self.getSlaveParameterDictDict()[
'custom_domain_ssl_crt_ssl_key_ssl_ca_crt'].copy()
slave_parameter_dict.update(
ssl_crt=customdomain_ca_certificate_pem,
ssl_key=customdomain_ca_key_pem,
ssl_ca_crt=ca.certificate_pem,
)
self.requestSlaveInstance(
partition_reference='custom_domain_ssl_crt_ssl_key_ssl_ca_crt',
partition_parameter_kw=slave_parameter_dict,
)
self.slap.waitForInstance()
self.runKedifaUpdater()
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
customdomain_ca_certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
certificate_file_list = glob.glob(os.path.join(
self.instance_path, '*', 'srv', 'bbb-ssl',
'_custom_domain_ssl_crt_ssl_key_ssl_ca_crt.crt'))
self.assertEqual(1, len(certificate_file_list))
certificate_file = certificate_file_list[0]
with open(certificate_file) as out:
expected = customdomain_ca_certificate_pem + '\n' + ca.certificate_pem \
+ '\n' + customdomain_ca_key_pem
self.assertEqual(
expected,
out.read()
)
def test_ssl_ca_crt_garbage(self):
parameter_dict = self.assertSlaveBase(
'ssl_ca_crt_garbage',
expected_parameter_dict={
'warning-list': [
'ssl_ca_crt is obsolete, please use key-upload-url',
'ssl_crt is obsolete, please use key-upload-url',
'ssl_key is obsolete, please use key-upload-url']
})
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.sslcacrtgarbage_ca_certificate_pem,
der2pem(result.peercert)
)
self.assertEqualResultJson(result, 'Path', '/test-path')
def test_ssl_ca_crt_does_not_match(self):
parameter_dict = self.assertSlaveBase(
'ssl_ca_crt_does_not_match',
expected_parameter_dict={
'warning-list': [
'ssl_ca_crt is obsolete, please use key-upload-url',
'ssl_crt is obsolete, please use key-upload-url',
'ssl_key is obsolete, please use key-upload-url'
]
})
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
certificate_file_list = glob.glob(os.path.join(
self.instance_path, '*', 'srv', 'bbb-ssl',
'_ssl_ca_crt_does_not_match.crt'))
self.assertEqual(1, len(certificate_file_list))
certificate_file = certificate_file_list[0]
with open(certificate_file) as out:
expected = self.certificate_pem + '\n' + self.ca.certificate_pem + \
'\n' + self.key_pem
self.assertEqual(
expected,
out.read()
)
class TestSlaveSlapOSMasterCertificateCompatibilityUpdate(
SlaveHttpFrontendTestCase, TestDataMixin):
@classmethod
def setUpMaster(cls):
# run partition until AIKC finishes
cls.runComputerPartitionUntil(
cls.untilNotReadyYetNotInMasterKeyGenerateAuthUrl)
parameter_dict = cls.requestDefaultInstance().getConnectionParameterDict()
cls._fetchKedifaCaucaseCaCertificateFile(parameter_dict)
# Do not upload certificates for the master partition
instance_parameter_dict = {
'domain': 'example.com',
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
'mpm-graceful-shutdown-timeout': 2,
}
@classmethod
def getInstanceParameterDict(cls):
if 'apache-certificate' not in cls.instance_parameter_dict:
cls.instance_parameter_dict.update(**{
'apache-certificate': cls.certificate_pem,
'apache-key': cls.key_pem,
})
return cls.instance_parameter_dict
@classmethod
def getSlaveParameterDictDict(cls):
return {
'ssl_from_master': {
'url': cls.backend_url,
'enable_cache': True,
},
}
def test_master_partition_state(self):
parameter_dict = self.parseConnectionParameterDict()
self.assertKeyWithPop('monitor-setup-url', parameter_dict)
self.assertBackendHaproxyStatisticUrl(parameter_dict)
self.assertKedifaKeysWithPop(parameter_dict, 'master-')
self.assertNodeInformationWithPop(parameter_dict)
self.assertRejectedSlavePromiseEmptyWithPop(parameter_dict)
expected_parameter_dict = {
'monitor-base-url': 'https://[%s]:8401' % self._ipv6_address,
'backend-client-caucase-url': 'http://[%s]:8990' % self._ipv6_address,
'domain': 'example.com',
'accepted-slave-amount': '1',
'rejected-slave-amount': '0',
'rejected-slave-dict': {},
'slave-amount': '1',
'warning-list': [
u'apache-certificate is obsolete, please use master-key-upload-url',
u'apache-key is obsolete, please use master-key-upload-url',
],
}
self.assertEqual(
expected_parameter_dict,
parameter_dict
)
def test_apache_key_apache_certificate_update(self):
parameter_dict = self.assertSlaveBase('ssl_from_master')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
_, key_pem, _, certificate_pem = createSelfSignedCertificate(
[
'*.customdomain.example.com',
'*.example.com',
'*.alias1.example.com',
])
self.instance_parameter_dict.update(**{
'apache-certificate': certificate_pem,
'apache-key': key_pem,
})
self.requestDefaultInstance()
self.slap.waitForInstance()
self.runKedifaUpdater()
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
class TestSlaveCiphers(SlaveHttpFrontendTestCase, TestDataMixin):
@classmethod
def getInstanceParameterDict(cls):
return {
'domain': 'example.com',
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
'mpm-graceful-shutdown-timeout': 2,
'ciphers': 'ECDHE-ECDSA-AES256-GCM-SHA384 ECDHE-RSA-AES256-GCM-SHA384'
}
@classmethod
def getSlaveParameterDictDict(cls):
return {
'default_ciphers': {
'url': cls.backend_url,
'enable_cache': True,
},
'own_ciphers': {
'ciphers': 'ECDHE-ECDSA-AES128-GCM-SHA256 ECDHE-RSA-AES128-GCM-SHA256',
'url': cls.backend_url,
'enable_cache': True,
},
}
def test_master_partition_state(self):
parameter_dict = self.parseConnectionParameterDict()
self.assertKeyWithPop('monitor-setup-url', parameter_dict)
self.assertBackendHaproxyStatisticUrl(parameter_dict)
self.assertKedifaKeysWithPop(parameter_dict, 'master-')
self.assertNodeInformationWithPop(parameter_dict)
self.assertRejectedSlavePromiseEmptyWithPop(parameter_dict)
expected_parameter_dict = {
'monitor-base-url': 'https://[%s]:8401' % self._ipv6_address,
'backend-client-caucase-url': 'http://[%s]:8990' % self._ipv6_address,
'domain': 'example.com',
'accepted-slave-amount': '2',
'rejected-slave-amount': '0',
'slave-amount': '2',
'rejected-slave-dict': {}
}
self.assertEqual(
expected_parameter_dict,
parameter_dict
)
def test_default_ciphers(self):
parameter_dict = self.assertSlaveBase('default_ciphers')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(httplib.OK, result.status_code)
result_http = fakeHTTPResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(httplib.FOUND, result_http.status_code)
configuration_file = glob.glob(
os.path.join(
self.instance_path, '*', 'etc', 'caddy-slave-conf.d',
'_default_ciphers.conf'
))[0]
self.assertTrue(
'ciphers ECDHE-ECDSA-AES256-GCM-SHA384 ECDHE-RSA-AES256-GCM-SHA384'
in open(configuration_file).read()
)
def test_own_ciphers(self):
parameter_dict = self.assertSlaveBase('own_ciphers')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(httplib.OK, result.status_code)
result_http = fakeHTTPResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(httplib.FOUND, result_http.status_code)
configuration_file = glob.glob(
os.path.join(
self.instance_path, '*', 'etc', 'caddy-slave-conf.d',
'_own_ciphers.conf'
))[0]
self.assertTrue(
'ciphers ECDHE-ECDSA-AES128-GCM-SHA256 ECDHE-RSA-AES128-GCM-SHA256'
in open(configuration_file).read()
)
class TestSlaveRejectReportUnsafeDamaged(SlaveHttpFrontendTestCase):
@classmethod
def prepareCertificate(cls):
cls.ca = CertificateAuthority('TestSlaveRejectReportUnsafeDamaged')
super(TestSlaveRejectReportUnsafeDamaged, cls).prepareCertificate()
@classmethod
def getInstanceParameterDict(cls):
return {
'domain': 'example.com',
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
}
@classmethod
def setUpClass(cls):
super(TestSlaveRejectReportUnsafeDamaged, cls).setUpClass()
cls.fillSlaveParameterDictDict()
cls.requestSlaves()
try:
cls.slap.waitForInstance(
max_retry=2 # two runs shall be enough
)
except Exception:
# ignores exceptions, as problems are tested
pass
cls.updateSlaveConnectionParameterDictDict()
slave_parameter_dict_dict = {}
@classmethod
def getSlaveParameterDictDict(cls):
return cls.slave_parameter_dict_dict
@classmethod
def fillSlaveParameterDictDict(cls):
cls.slave_parameter_dict_dict = {
'URL': {
'url': "https://[fd46::c2ae]:!py!u'123123'",
},
'HTTPS-URL': {
'https-url': "https://[fd46::c2ae]:!py!u'123123'",
},
'SSL-PROXY-VERIFY_SSL_PROXY_CA_CRT_DAMAGED': {
'url': cls.backend_https_url,
'ssl-proxy-verify': True,
'ssl_proxy_ca_crt': 'damaged',
},
'SSL-PROXY-VERIFY_SSL_PROXY_CA_CRT_EMPTY': {
'url': cls.backend_https_url,
'ssl-proxy-verify': True,
'ssl_proxy_ca_crt': '',
},
'health-check-failover-SSL-PROXY-VERIFY_SSL_PROXY_CA_CRT_DAMAGED': {
'url': cls.backend_https_url,
'health-check-failover-ssl-proxy-verify': True,
'health-check-failover-ssl-proxy-ca-crt': 'damaged',
},
'health-check-failover-SSL-PROXY-VERIFY_SSL_PROXY_CA_CRT_EMPTY': {
'url': cls.backend_https_url,
'health-check-failover-ssl-proxy-verify': True,
'health-check-failover-ssl-proxy-ca-crt': '',
},
'BAD-BACKEND': {
'url': 'http://1:2:3:4',
'https-url': 'http://host.domain:badport',
},
'EMPTY-BACKEND': {
'url': '',
'https-url': '',
},
'CUSTOM_DOMAIN-UNSAFE': {
'custom_domain': '${section:option} afterspace\nafternewline',
},
'SERVER-ALIAS-UNSAFE': {
'server-alias': '${section:option} afterspace',
},
'SERVER-ALIAS-SAME': {
'url': cls.backend_url,
'server-alias': 'serveraliassame.example.com',
},
'VIRTUALHOSTROOT-HTTP-PORT-UNSAFE': {
'type': 'zope',
'url': cls.backend_url,
'virtualhostroot-http-port': '${section:option}',
},
'VIRTUALHOSTROOT-HTTPS-PORT-UNSAFE': {
'type': 'zope',
'url': cls.backend_url,
'virtualhostroot-https-port': '${section:option}',
},
'DEFAULT-PATH-UNSAFE': {
'type': 'zope',
'url': cls.backend_url,
'default-path': '${section:option}\nn"\newline\n}\n}proxy\n/slashed',
},
'MONITOR-IPV4-TEST-UNSAFE': {
'monitor-ipv4-test': '${section:option}\nafternewline ipv4',
},
'MONITOR-IPV6-TEST-UNSAFE': {
'monitor-ipv6-test': '${section:option}\nafternewline ipv6',
},
'BAD-CIPHERS': {
'ciphers': 'bad ECDHE-ECDSA-AES256-GCM-SHA384 again',
},
'SITE_1': {
'custom_domain': 'duplicate.example.com',
},
'SITE_2': {
'custom_domain': 'duplicate.example.com',
},
'SITE_3': {
'server-alias': 'duplicate.example.com',
},
'SITE_4': {
'custom_domain': 'duplicate.example.com',
'server-alias': 'duplicate.example.com',
},
'SSL_CA_CRT_ONLY': {
'url': cls.backend_url,
'ssl_ca_crt': cls.ca.certificate_pem,
},
'SSL_KEY-SSL_CRT-UNSAFE': {
'ssl_key': '${section:option}ssl_keyunsafe\nunsafe',
'ssl_crt': '${section:option}ssl_crtunsafe\nunsafe',
},
'health-check-http-method': {
'health-check': True,
'health-check-http-method': 'WRONG',
},
'health-check-http-version': {
'health-check': True,
'health-check-http-version': 'WRONG/1.1',
},
'health-check-timeout': {
'health-check': True,
'health-check-timeout': 'WRONG',
},
'health-check-timeout-negative': {
'health-check': True,
'health-check-timeout': '-2',
},
'health-check-interval': {
'health-check': True,
'health-check-interval': 'WRONG',
},
'health-check-interval-negative': {
'health-check': True,
'health-check-interval': '-2',
},
'health-check-rise': {
'health-check': True,
'health-check-rise': 'WRONG',
},
'health-check-rise-negative': {
'health-check': True,
'health-check-rise': '-2',
},
'health-check-fall': {
'health-check': True,
'health-check-fall': 'WRONG',
},
'health-check-fall-negative': {
'health-check': True,
'health-check-fall': '-2',
}
}
def assertRejectedSlavePromiseWithPop(self, parameter_dict):
rejected_slave_promise_url = parameter_dict.pop(
'rejected-slave-promise-url')
try:
result = requests.get(rejected_slave_promise_url, verify=False)
if result.text == '':
result_json = {}
else:
result_json = result.json()
self.assertEqual(
{
u'_SITE_4': [u"custom_domain 'duplicate.example.com' clashes"],
u'_SITE_2': [u"custom_domain 'duplicate.example.com' clashes"],
u'_SITE_3': [u"server-alias 'duplicate.example.com' clashes"]
},
result_json
)
except AssertionError:
raise
except Exception as e:
self.fail(e)
def test_master_partition_state(self):
parameter_dict = self.parseConnectionParameterDict()
self.assertKeyWithPop('monitor-setup-url', parameter_dict)
self.assertBackendHaproxyStatisticUrl(parameter_dict)
self.assertKedifaKeysWithPop(parameter_dict, 'master-')
self.assertNodeInformationWithPop(parameter_dict)
self.assertRejectedSlavePromiseWithPop(parameter_dict)
expected_parameter_dict = {
'monitor-base-url': 'https://[%s]:8401' % self._ipv6_address,
'backend-client-caucase-url': 'http://[%s]:8990' % self._ipv6_address,
'domain': 'example.com',
'accepted-slave-amount': '5',
'rejected-slave-amount': '28',
'slave-amount': '33',
'rejected-slave-dict': {
'_HTTPS-URL': ['slave https-url "https://[fd46::c2ae]:!py!u\'123123\'"'
' invalid'],
'_URL': [u'slave url "https://[fd46::c2ae]:!py!u\'123123\'" invalid'],
'_SSL-PROXY-VERIFY_SSL_PROXY_CA_CRT_DAMAGED': [
'ssl_proxy_ca_crt is invalid'
],
'_SSL-PROXY-VERIFY_SSL_PROXY_CA_CRT_EMPTY': [
'ssl_proxy_ca_crt is invalid'
],
'_BAD-CIPHERS': [
"Cipher 'again' is not supported.",
"Cipher 'bad' is not supported."
],
'_CUSTOM_DOMAIN-UNSAFE': [
"custom_domain '${section:option} afterspace\\nafternewline' invalid"
],
'_SERVER-ALIAS-UNSAFE': [
"server-alias '${section:option}' not valid",
"server-alias 'afterspace' not valid"
],
'_SITE_2': ["custom_domain 'duplicate.example.com' clashes"],
'_SITE_3': ["server-alias 'duplicate.example.com' clashes"],
'_SITE_4': ["custom_domain 'duplicate.example.com' clashes"],
'_SSL_CA_CRT_ONLY': [
"ssl_ca_crt is present, so ssl_crt and ssl_key are required"],
'_SSL_KEY-SSL_CRT-UNSAFE': [
"slave ssl_key and ssl_crt does not match"],
'_BAD-BACKEND': [
"slave https-url 'http://host.domain:badport' invalid",
"slave url 'http://1:2:3:4' invalid"],
'_VIRTUALHOSTROOT-HTTP-PORT-UNSAFE': [
"Wrong virtualhostroot-http-port '${section:option}'"],
'_VIRTUALHOSTROOT-HTTPS-PORT-UNSAFE': [
"Wrong virtualhostroot-https-port '${section:option}'"],
'_EMPTY-BACKEND': [
"slave https-url '' invalid",
"slave url '' invalid"],
'_health-check-failover-SSL-PROXY-VERIFY_SSL_PROXY_CA_CRT_DAMAGED': [
'health-check-failover-ssl-proxy-ca-crt is invalid'
],
'_health-check-failover-SSL-PROXY-VERIFY_SSL_PROXY_CA_CRT_EMPTY': [
'health-check-failover-ssl-proxy-ca-crt is invalid'
],
'_health-check-fall': [
'Wrong health-check-fall WRONG'],
'_health-check-fall-negative': [
'Wrong health-check-fall -2'],
'_health-check-http-method': [
'Wrong health-check-http-method WRONG'],
'_health-check-http-version': [
'Wrong health-check-http-version WRONG/1.1'],
'_health-check-interval': [
'Wrong health-check-interval WRONG'],
'_health-check-interval-negative': [
'Wrong health-check-interval -2'],
'_health-check-rise': [
'Wrong health-check-rise WRONG'],
'_health-check-rise-negative': [
'Wrong health-check-rise -2'],
'_health-check-timeout': [
'Wrong health-check-timeout WRONG'],
'_health-check-timeout-negative': [
'Wrong health-check-timeout -2'],
},
'warning-slave-dict': {
'_SSL_CA_CRT_ONLY': [
'ssl_ca_crt is obsolete, please use key-upload-url'],
'_SSL_KEY-SSL_CRT-UNSAFE': [
'ssl_crt is obsolete, please use key-upload-url',
'ssl_key is obsolete, please use key-upload-url']}
}
self.assertEqual(
expected_parameter_dict,
parameter_dict
)
def test_url(self):
parameter_dict = self.parseSlaveParameterDict('URL')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'request-error-list': [
"slave url \"https://[fd46::c2ae]:!py!u'123123'\" invalid"]
},
parameter_dict
)
def test_https_url(self):
parameter_dict = self.parseSlaveParameterDict('HTTPS-URL')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'request-error-list': [
"slave https-url \"https://[fd46::c2ae]:!py!u'123123'\" invalid"]
},
parameter_dict
)
def test_ssl_proxy_verify_ssl_proxy_ca_crt_damaged(self):
parameter_dict = self.parseSlaveParameterDict(
'SSL-PROXY-VERIFY_SSL_PROXY_CA_CRT_DAMAGED')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{'request-error-list': ["ssl_proxy_ca_crt is invalid"]},
parameter_dict
)
def test_ssl_proxy_verify_ssl_proxy_ca_crt_empty(self):
parameter_dict = self.parseSlaveParameterDict(
'SSL-PROXY-VERIFY_SSL_PROXY_CA_CRT_EMPTY')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{'request-error-list': ["ssl_proxy_ca_crt is invalid"]},
parameter_dict
)
def test_health_check_failover_ssl_proxy_ca_crt_damaged(self):
parameter_dict = self.parseSlaveParameterDict(
'health-check-failover-SSL-PROXY-VERIFY_SSL_PROXY_CA_CRT_DAMAGED')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'request-error-list': [
"health-check-failover-ssl-proxy-ca-crt is invalid"]
},
parameter_dict
)
def test_health_check_failover_ssl_proxy_ca_crt_empty(self):
parameter_dict = self.parseSlaveParameterDict(
'health-check-failover-SSL-PROXY-VERIFY_SSL_PROXY_CA_CRT_EMPTY')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'request-error-list': [
"health-check-failover-ssl-proxy-ca-crt is invalid"]
},
parameter_dict
)
def test_server_alias_same(self):
parameter_dict = self.assertSlaveBase(
'SERVER-ALIAS-SAME')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
def test_custom_domain_unsafe(self):
parameter_dict = self.parseSlaveParameterDict('CUSTOM_DOMAIN-UNSAFE')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'request-error-list': [
"custom_domain '${section:option} afterspace\\nafternewline' invalid"
]
},
parameter_dict
)
def test_server_alias_unsafe(self):
parameter_dict = self.parseSlaveParameterDict('SERVER-ALIAS-UNSAFE')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'request-error-list': [
"server-alias '${section:option}' not valid", "server-alias "
"'afterspace' not valid"]
},
parameter_dict
)
def test_bad_ciphers(self):
parameter_dict = self.parseSlaveParameterDict('BAD-CIPHERS')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'request-error-list': [
"Cipher 'again' is not supported.",
"Cipher 'bad' is not supported."
]
},
parameter_dict
)
def test_virtualhostroot_http_port_unsafe(self):
parameter_dict = self.parseSlaveParameterDict(
'VIRTUALHOSTROOT-HTTP-PORT-UNSAFE')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'request-error-list': [
"Wrong virtualhostroot-http-port '${section:option}'"
]
},
parameter_dict
)
def test_virtualhostroot_https_port_unsafe(self):
parameter_dict = self.parseSlaveParameterDict(
'VIRTUALHOSTROOT-HTTPS-PORT-UNSAFE')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'request-error-list': [
"Wrong virtualhostroot-https-port '${section:option}'"
]
},
parameter_dict
)
def default_path_unsafe(self):
parameter_dict = self.parseSlaveParameterDict('DEFAULT-PATH-UNSAFE')
self.assertLogAccessUrlWithPop(parameter_dict)
self.assertKedifaKeysWithPop(parameter_dict, 'master-')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'domain': 'defaultpathunsafe.example.com',
'replication_number': '1',
'url': 'http://defaultpathunsafe.example.com',
'site_url': 'http://defaultpathunsafe.example.com',
'secure_access': 'https://defaultpathunsafe.example.com',
'backend-client-caucase-url': 'http://[%s]:8990' % self._ipv6_address,
},
parameter_dict
)
result = fakeHTTPSResult(
parameter_dict['domain'], '')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(
httplib.MOVED_PERMANENTLY,
result.status_code
)
self.assertEqual(
'https://defaultpathunsafe.example.com:%s/%%24%%7Bsection%%3Aoption%%7D'
'%%0An%%22%%0Aewline%%0A%%7D%%0A%%7Dproxy%%0A/slashed' % (HTTPS_PORT,),
result.headers['Location']
)
def test_monitor_ipv4_test_unsafe(self):
parameter_dict = self.assertSlaveBase('MONITOR-IPV4-TEST-UNSAFE')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(httplib.SERVICE_UNAVAILABLE, result.status_code)
result_http = fakeHTTPResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(httplib.FOUND, result_http.status_code)
monitor_file = glob.glob(
os.path.join(
self.instance_path, '*', 'etc', 'plugin',
'check-_MONITOR-IPV4-TEST-UNSAFE-ipv4-packet-list-test.py'))[0]
# get promise module and check that parameters are ok
self.assertEqual(
getPromisePluginParameterDict(monitor_file),
{
'frequency': '720',
'ipv4': 'true',
'address': '${section:option}\nafternewline ipv4',
}
)
def test_monitor_ipv6_test_unsafe(self):
parameter_dict = self.assertSlaveBase('MONITOR-IPV6-TEST-UNSAFE')
result = fakeHTTPSResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqual(httplib.SERVICE_UNAVAILABLE, result.status_code)
result_http = fakeHTTPResult(
parameter_dict['domain'], 'test-path')
self.assertEqual(httplib.FOUND, result_http.status_code)
monitor_file = glob.glob(
os.path.join(
self.instance_path, '*', 'etc', 'plugin',
'check-_MONITOR-IPV6-TEST-UNSAFE-ipv6-packet-list-test.py'))[0]
# get promise module and check that parameters are ok
self.assertEqual(
getPromisePluginParameterDict(monitor_file),
{
'frequency': '720',
'address': '${section:option}\nafternewline ipv6'
}
)
def test_site_1(self):
self.assertSlaveBase('SITE_1', hostname='duplicate')
def test_site_2(self):
parameter_dict = self.parseSlaveParameterDict('SITE_2')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'request-error-list': ["custom_domain 'duplicate.example.com' clashes"]
},
parameter_dict
)
def test_site_3(self):
parameter_dict = self.parseSlaveParameterDict('SITE_3')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'request-error-list': ["server-alias 'duplicate.example.com' clashes"]
},
parameter_dict,
)
def test_site_4(self):
parameter_dict = self.parseSlaveParameterDict('SITE_4')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'request-error-list': ["custom_domain 'duplicate.example.com' clashes"]
},
parameter_dict
)
def test_ssl_ca_crt_only(self):
parameter_dict = self.parseSlaveParameterDict('SSL_CA_CRT_ONLY')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
parameter_dict,
{
'request-error-list': [
"ssl_ca_crt is present, so ssl_crt and ssl_key are required"],
'warning-list': [
'ssl_ca_crt is obsolete, please use key-upload-url',
],
}
)
def test_ssl_key_ssl_crt_unsafe(self):
parameter_dict = self.parseSlaveParameterDict('SSL_KEY-SSL_CRT-UNSAFE')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'request-error-list': ["slave ssl_key and ssl_crt does not match"],
'warning-list': [
'ssl_crt is obsolete, please use key-upload-url',
'ssl_key is obsolete, please use key-upload-url']
},
parameter_dict
)
def test_bad_backend(self):
parameter_dict = self.parseSlaveParameterDict('BAD-BACKEND')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'request-error-list': [
"slave https-url 'http://host.domain:badport' invalid",
"slave url 'http://1:2:3:4' invalid"],
},
parameter_dict
)
def test_empty_backend(self):
parameter_dict = self.parseSlaveParameterDict('EMPTY-BACKEND')
self.assertNodeInformationWithPop(parameter_dict)
self.assertEqual(
{
'request-error-list': [
"slave https-url '' invalid",
"slave url '' invalid"]
},
parameter_dict
)
class TestSlaveHostHaproxyClash(SlaveHttpFrontendTestCase, TestDataMixin):
@classmethod
def getInstanceParameterDict(cls):
return {
'domain': 'example.com',
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
'mpm-graceful-shutdown-timeout': 2,
'request-timeout': '12',
}
@classmethod
def getSlaveParameterDictDict(cls):
# Note: The slaves are specifically constructed to have an order which
# is triggering the problem. Slave list is sorted in many places,
# and such slave configuration will result with them begin seen
# by backend haproxy configuration in exactly the way seen below
# Ordering it here will not help at all.
return {
'wildcard': {
'url': cls.backend_url + 'wildcard',
'custom_domain': '*.alias1.example.com',
},
'zspecific': {
'url': cls.backend_url + 'zspecific',
'custom_domain': 'zspecific.alias1.example.com',
},
}
def test(self):
self.assertSlaveBase(
'wildcard', hostname='*.alias1')
self.assertSlaveBase(
'zspecific', hostname='zspecific.alias1')
result_wildcard = fakeHTTPSResult(
'other.alias1.example.com',
'test-path',
headers={
'Timeout': '10', # more than default backend-connect-timeout == 5
'Accept-Encoding': 'gzip',
}
)
self.assertEqual(self.certificate_pem, der2pem(result_wildcard.peercert))
self.assertEqualResultJson(result_wildcard, 'Path', '/wildcard/test-path')
result_specific = fakeHTTPSResult(
'zspecific.alias1.example.com',
'test-path',
headers={
'Timeout': '10', # more than default backend-connect-timeout == 5
'Accept-Encoding': 'gzip',
}
)
self.assertEqual(self.certificate_pem, der2pem(result_specific.peercert))
self.assertEqualResultJson(result_specific, 'Path', '/zspecific/test-path')
class TestPassedRequestParameter(HttpFrontendTestCase):
# special SRs to check out
frontend_2_sr = 'special_sr_for_2'
frontend_3_sr = 'special_sr_for_3'
kedifa_sr = 'special_sr_for_kedifa'
@classmethod
def setUpClass(cls):
super(TestPassedRequestParameter, cls).setUpClass()
cls.slap.supply(cls.frontend_2_sr, cls.slap._computer_id)
cls.slap.supply(cls.frontend_3_sr, cls.slap._computer_id)
cls.slap.supply(cls.kedifa_sr, cls.slap._computer_id)
@classmethod
def tearDownClass(cls):
cls.slap.supply(
cls.frontend_2_sr, cls.slap._computer_id, state="destroyed")
cls.slap.supply(
cls.frontend_3_sr, cls.slap._computer_id, state="destroyed")
cls.slap.supply(
cls.kedifa_sr, cls.slap._computer_id, state="destroyed")
super(TestPassedRequestParameter, cls).tearDownClass()
instance_parameter_dict = {
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
}
@classmethod
def getInstanceParameterDict(cls):
return cls.instance_parameter_dict
def test(self):
self.instance_parameter_dict.update({
# master partition parameters
'-frontend-quantity': 3,
'-sla-2-computer_guid': self.slap._computer_id,
'-sla-3-computer_guid': self.slap._computer_id,
'-frontend-2-state': 'stopped',
'-frontend-2-software-release-url': self.frontend_2_sr,
'-frontend-3-state': 'stopped',
'-frontend-3-software-release-url': self.frontend_3_sr,
'-kedifa-software-release-url': self.kedifa_sr,
'automatic-internal-kedifa-caucase-csr': False,
'automatic-internal-backend-client-caucase-csr': False,
# all nodes partition parameters
'apache-certificate': self.certificate_pem,
'apache-key': self.key_pem,
'domain': 'example.com',
'enable-http2-by-default': True,
'global-disable-http2': True,
'mpm-graceful-shutdown-timeout': 2,
're6st-verification-url': 're6st-verification-url',
'backend-connect-timeout': 2,
'backend-connect-retries': 1,
'ciphers': 'ciphers',
'request-timeout': 100,
'authenticate-to-backend': True,
# specific parameters
'-frontend-config-1-ram-cache-size': '512K',
'-frontend-config-2-ram-cache-size': '256K',
})
# re-request instance with updated parameters
self.requestDefaultInstance()
# run once instance, it's only needed for later checks
try:
self.slap.waitForInstance()
except Exception:
pass
computer = self.slap._slap.registerComputer('local')
# state of parameters of all instances
partition_parameter_dict_dict = {}
for partition in computer.getComputerPartitionList():
if partition.getState() == 'destroyed':
continue
parameter_dict = partition.getInstanceParameterDict()
instance_title = parameter_dict['instance_title']
if '_' in parameter_dict:
# "flatten" the instance parameter
parameter_dict = json.loads(parameter_dict['_'])
partition_parameter_dict_dict[instance_title] = parameter_dict
parameter_dict[
'X-software_release_url'] = partition.getSoftwareRelease().getURI()
base_software_url = self.getSoftwareURL()
# drop some very varying parameters
def assertKeyWithPop(d, k):
self.assertIn(k, d)
d.pop(k)
assertKeyWithPop(
partition_parameter_dict_dict['caddy-frontend-1'],
'master-key-download-url')
assertKeyWithPop(
partition_parameter_dict_dict['caddy-frontend-2'],
'master-key-download-url')
assertKeyWithPop(
partition_parameter_dict_dict['caddy-frontend-3'],
'master-key-download-url')
assertKeyWithPop(
partition_parameter_dict_dict['testing partition 0'],
'timestamp')
assertKeyWithPop(
partition_parameter_dict_dict['testing partition 0'],
'ip_list')
monitor_password = partition_parameter_dict_dict[
'caddy-frontend-1'].pop('monitor-password')
self.assertEqual(
monitor_password,
partition_parameter_dict_dict[
'caddy-frontend-2'].pop('monitor-password')
)
self.assertEqual(
monitor_password,
partition_parameter_dict_dict[
'caddy-frontend-3'].pop('monitor-password')
)
self.assertEqual(
monitor_password,
partition_parameter_dict_dict[
'kedifa'].pop('monitor-password')
)
backend_client_caucase_url = u'http://[%s]:8990' % (self._ipv6_address,)
kedifa_caucase_url = u'http://[%s]:15090' % (self._ipv6_address,)
expected_partition_parameter_dict_dict = {
'caddy-frontend-1': {
'X-software_release_url': base_software_url,
u'apache-certificate': unicode(self.certificate_pem),
u'apache-key': unicode(self.key_pem),
u'authenticate-to-backend': u'True',
u'backend-client-caucase-url': backend_client_caucase_url,
u'backend-connect-retries': u'1',
u'backend-connect-timeout': u'2',
u'ciphers': u'ciphers',
u'cluster-identification': u'testing partition 0',
u'domain': u'example.com',
u'enable-http2-by-default': u'True',
u'extra_slave_instance_list': u'[]',
u'frontend-name': u'caddy-frontend-1',
u'global-disable-http2': u'True',
u'kedifa-caucase-url': kedifa_caucase_url,
u'monitor-cors-domains': u'monitor.app.officejs.com',
u'monitor-httpd-port': 8411,
u'monitor-username': u'admin',
u'mpm-graceful-shutdown-timeout': u'2',
u'plain_http_port': '11080',
u'port': '11443',
u'ram-cache-size': u'512K',
u're6st-verification-url': u're6st-verification-url',
u'request-timeout': u'100',
u'slave-kedifa-information': u'{}'
},
'caddy-frontend-2': {
'X-software_release_url': self.frontend_2_sr,
u'apache-certificate': unicode(self.certificate_pem),
u'apache-key': unicode(self.key_pem),
u'authenticate-to-backend': u'True',
u'backend-client-caucase-url': backend_client_caucase_url,
u'backend-connect-retries': u'1',
u'backend-connect-timeout': u'2',
u'ciphers': u'ciphers',
u'cluster-identification': u'testing partition 0',
u'domain': u'example.com',
u'enable-http2-by-default': u'True',
u'extra_slave_instance_list': u'[]',
u'frontend-name': u'caddy-frontend-2',
u'global-disable-http2': u'True',
u'kedifa-caucase-url': kedifa_caucase_url,
u'monitor-cors-domains': u'monitor.app.officejs.com',
u'monitor-httpd-port': 8412,
u'monitor-username': u'admin',
u'mpm-graceful-shutdown-timeout': u'2',
u'plain_http_port': u'11080',
u'port': u'11443',
u'ram-cache-size': u'256K',
u're6st-verification-url': u're6st-verification-url',
u'request-timeout': u'100',
u'slave-kedifa-information': u'{}'
},
'caddy-frontend-3': {
'X-software_release_url': self.frontend_3_sr,
u'apache-certificate': unicode(self.certificate_pem),
u'apache-key': unicode(self.key_pem),
u'authenticate-to-backend': u'True',
u'backend-client-caucase-url': backend_client_caucase_url,
u'backend-connect-retries': u'1',
u'backend-connect-timeout': u'2',
u'ciphers': u'ciphers',
u'cluster-identification': u'testing partition 0',
u'domain': u'example.com',
u'enable-http2-by-default': u'True',
u'extra_slave_instance_list': u'[]',
u'frontend-name': u'caddy-frontend-3',
u'global-disable-http2': u'True',
u'kedifa-caucase-url': kedifa_caucase_url,
u'monitor-cors-domains': u'monitor.app.officejs.com',
u'monitor-httpd-port': 8413,
u'monitor-username': u'admin',
u'mpm-graceful-shutdown-timeout': u'2',
u'plain_http_port': u'11080',
u'port': u'11443',
u're6st-verification-url': u're6st-verification-url',
u'request-timeout': u'100',
u'slave-kedifa-information': u'{}'
},
'kedifa': {
'X-software_release_url': self.kedifa_sr,
u'caucase_port': u'15090',
u'cluster-identification': u'testing partition 0',
u'kedifa_port': u'15080',
u'monitor-cors-domains': u'monitor.app.officejs.com',
u'monitor-httpd-port': u'8402',
u'monitor-username': u'admin',
u'slave-list': []
},
'testing partition 0': {
'-frontend-2-software-release-url': self.frontend_2_sr,
'-frontend-2-state': 'stopped',
'-frontend-3-software-release-url': self.frontend_3_sr,
'-frontend-3-state': 'stopped',
'-frontend-config-1-ram-cache-size': '512K',
'-frontend-config-2-ram-cache-size': '256K',
'-frontend-quantity': '3',
'-kedifa-software-release-url': self.kedifa_sr,
'-sla-2-computer_guid': 'local',
'-sla-3-computer_guid': 'local',
'X-software_release_url': base_software_url,
'apache-certificate': unicode(self.certificate_pem),
'apache-key': unicode(self.key_pem),
'authenticate-to-backend': 'True',
'automatic-internal-backend-client-caucase-csr': 'False',
'automatic-internal-kedifa-caucase-csr': 'False',
'backend-connect-retries': '1',
'backend-connect-timeout': '2',
'caucase_port': '15090',
'ciphers': 'ciphers',
'domain': 'example.com',
'enable-http2-by-default': 'True',
'full_address_list': [],
'global-disable-http2': 'True',
'instance_title': 'testing partition 0',
'kedifa_port': '15080',
'mpm-graceful-shutdown-timeout': '2',
'plain_http_port': '11080',
'port': '11443',
're6st-verification-url': 're6st-verification-url',
'request-timeout': '100',
'root_instance_title': 'testing partition 0',
'slap_computer_id': 'local',
'slap_computer_partition_id': 'T-0',
'slap_software_release_url': base_software_url,
'slap_software_type': 'RootSoftwareInstance',
'slave_instance_list': []
}
}
self.assertEqual(
expected_partition_parameter_dict_dict,
partition_parameter_dict_dict
)
class TestSlaveHealthCheck(SlaveHttpFrontendTestCase, TestDataMixin):
@classmethod
def getInstanceParameterDict(cls):
return {
'domain': 'example.com',
'port': HTTPS_PORT,
'plain_http_port': HTTP_PORT,
'kedifa_port': KEDIFA_PORT,
'caucase_port': CAUCASE_PORT,
'mpm-graceful-shutdown-timeout': 2,
'request-timeout': '12',
}
@classmethod
def getSlaveParameterDictDict(cls):
cls.setUpAssertionDict()
return {
'health-check-disabled': {
'url': cls.backend_url,
},
'health-check-default': {
'url': cls.backend_url,
'health-check': True,
},
'health-check-connect': {
'url': cls.backend_url,
'health-check': True,
'health-check-http-method': 'CONNECT',
},
'health-check-custom': {
'url': cls.backend_url,
'health-check': True,
'health-check-http-method': 'POST',
'health-check-http-path': '/POST-path to be encoded',
'health-check-http-version': 'HTTP/1.0',
'health-check-timeout': '7',
'health-check-interval': '15',
'health-check-rise': '3',
'health-check-fall': '7',
},
'health-check-failover-url': {
'https-only': False, # http and https access to check
'health-check-timeout': 1, # fail fast for test
'health-check-interval': 1, # fail fast for test
'url': cls.backend_url + 'url',
'https-url': cls.backend_url + 'https-url',
'health-check': True,
'health-check-http-path': '/health-check-failover-url',
'health-check-failover-url': cls.backend_url + 'failover-url?a=b&c=',
'health-check-failover-https-url':
cls.backend_url + 'failover-https-url?a=b&c=',
},
'health-check-failover-url-netloc-list': {
'https-only': False, # http and https access to check
'health-check-timeout': 1, # fail fast for test
'health-check-interval': 1, # fail fast for test
'url': cls.backend_url + 'url',
'https-url': cls.backend_url + 'https-url',
'health-check': True,
'health-check-http-path': '/health-check-failover-url',
'health-check-failover-url': cls.backend_url + 'failover-url?a=b&c=',
'health-check-failover-https-url':
cls.backend_url + 'failover-https-url?a=b&c=',
'health-check-failover-url-netloc-list':
'%(ip)s:%(port_a)s %(ip)s:%(port_b)s' % {
'ip': cls._ipv4_address,
'port_a': cls._server_netloc_a_http_port,
'port_b': cls._server_netloc_b_http_port},
},
'health-check-failover-url-auth-to-backend': {
'https-only': False, # http and https access to check
'health-check-timeout': 1, # fail fast for test
'health-check-interval': 1, # fail fast for test
'url': cls.backend_url + 'url',
'https-url': cls.backend_url + 'https-url',
'health-check': True,
'health-check-http-path': '/health-check-failover-url-auth-to-backend',
'health-check-authenticate-to-failover-backend': True,
'health-check-failover-url': 'https://%s:%s/failover-url?a=b&c=' % (
cls._ipv4_address, cls._server_https_auth_port),
'health-check-failover-https-url':
'https://%s:%s/failover-https-url?a=b&c=' % (
cls._ipv4_address, cls._server_https_auth_port),
},
'health-check-failover-url-ssl-proxy-verified': {
'url': cls.backend_url,
'health-check-timeout': 1, # fail fast for test
'health-check-interval': 1, # fail fast for test
'health-check': True,
'health-check-http-path': '/health-check-failover-url-ssl-proxy'
'-verified',
'health-check-failover-url': cls.backend_https_url,
'health-check-failover-ssl-proxy-verify': True,
'health-check-failover-ssl-proxy-ca-crt':
cls.test_server_ca.certificate_pem,
},
'health-check-failover-url-ssl-proxy-verify-unverified': {
'url': cls.backend_url,
'health-check-timeout': 1, # fail fast for test
'health-check-interval': 1, # fail fast for test
'health-check': True,
'health-check-http-path': '/health-check-failover-url-ssl-proxy-verify'
'-unverified',
'health-check-failover-url': cls.backend_https_url,
'health-check-failover-ssl-proxy-verify': True,
'health-check-failover-ssl-proxy-ca-crt':
cls.another_server_ca.certificate_pem,
},
'health-check-failover-url-ssl-proxy-verify-missing': {
'url': cls.backend_url,
'health-check-timeout': 1, # fail fast for test
'health-check-interval': 1, # fail fast for test
'health-check': True,
'health-check-http-path': '/health-check-failover-url-ssl-proxy-verify'
'-missing',
'health-check-failover-url': cls.backend_https_url,
'health-check-failover-ssl-proxy-verify': True,
},
}
@classmethod
def setUpAssertionDict(cls):
backend = urlparse.urlparse(cls.backend_url).netloc
cls.assertion_dict = {
'health-check-disabled': """\
backend _health-check-disabled-http
timeout server 12s
timeout connect 5s
retries 3
server _health-check-disabled-backend-http %s""" % (backend,),
'health-check-connect': """\
backend _health-check-connect-http
timeout server 12s
timeout connect 5s
retries 3
server _health-check-connect-backend-http %s check inter 5s"""
""" rise 1 fall 2
timeout check 2s""" % (backend,),
'health-check-custom': """\
backend _health-check-custom-http
timeout server 12s
timeout connect 5s
retries 3
server _health-check-custom-backend-http %s check inter 15s"""
""" rise 3 fall 7
option httpchk POST /POST-path%%20to%%20be%%20encoded HTTP/1.0
timeout check 7s""" % (backend,),
'health-check-default': """\
backend _health-check-default-http
timeout server 12s
timeout connect 5s
retries 3
server _health-check-default-backend-http %s check inter 5s"""
""" rise 1 fall 2
option httpchk GET / HTTP/1.1
timeout check 2s""" % (backend, )
}
def _test(self, key):
parameter_dict = self.assertSlaveBase(key)
self.assertIn(
self.assertion_dict[key],
self._get_backend_haproxy_configuration()
)
result = fakeHTTPSResult(
parameter_dict['domain'],
'test-path/deep/.././deeper',
headers={
'Timeout': '10', # more than default backend-connect-timeout == 5
'Accept-Encoding': 'gzip',
}
)
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path/deeper')
def test_health_check_disabled(self):
self._test('health-check-disabled')
def test_health_check_default(self):
self._test('health-check-default')
def test_health_check_connect(self):
self._test('health-check-connect')
def test_health_check_custom(self):
self._test('health-check-custom')
def test_health_check_failover_url(self):
parameter_dict = self.assertSlaveBase('health-check-failover-url')
slave_parameter_dict = self.getSlaveParameterDictDict()[
'health-check-failover-url']
# check normal access
result = fakeHTTPResult(parameter_dict['domain'], '/path')
self.assertEqualResultJson(result, 'Path', '/url/path')
result = fakeHTTPSResult(parameter_dict['domain'], '/path')
self.assertEqual(self.certificate_pem, der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/https-url/path')
# start replying with bad status code
result = requests.put(
self.backend_url + slave_parameter_dict[
'health-check-http-path'].strip('/'),
headers={'X-Reply-Status-Code': '502'})
self.assertEqual(result.status_code, httplib.CREATED)
def restoreBackend():
result = requests.put(
self.backend_url + slave_parameter_dict[
'health-check-http-path'].strip('/'),
headers={})
self.assertEqual(result.status_code, httplib.CREATED)
self.addCleanup(restoreBackend)
time.sleep(3) # > health-check-timeout + health-check-interval
result = fakeHTTPSResult(parameter_dict['domain'], '/failoverpath')
self.assertEqual(self.certificate_pem, der2pem(result.peercert))
self.assertEqualResultJson(
result, 'Path', '/failover-https-url?a=b&c=/failoverpath')
self.assertLastLogLineRegexp(
'_health-check-failover-url_backend_log',
r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d+ '
r'\[\d{2}\/.{3}\/\d{4}\:\d{2}\:\d{2}\:\d{2}.\d{3}\] '
r'https-backend _health-check-failover-url-https-failover'
r'\/_health-check-failover-url-backend-https '
r'\d+/\d+\/\d+\/\d+\/\d+ '
r'200 \d+ - - ---- '
r'\d+\/\d+\/\d+\/\d+\/\d+ \d+\/\d+ '
r'"GET /failoverpath HTTP/1.1"'
)
result = fakeHTTPResult(parameter_dict['domain'], '/failoverpath')
self.assertEqualResultJson(
result, 'Path', '/failover-url?a=b&c=/failoverpath')
self.assertLastLogLineRegexp(
'_health-check-failover-url_backend_log',
r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d+ '
r'\[\d{2}\/.{3}\/\d{4}\:\d{2}\:\d{2}\:\d{2}.\d{3}\] '
r'http-backend _health-check-failover-url-http-failover'
r'\/_health-check-failover-url-backend-http '
r'\d+/\d+\/\d+\/\d+\/\d+ '
r'200 \d+ - - ---- '
r'\d+\/\d+\/\d+\/\d+\/\d+ \d+\/\d+ '
r'"GET /failoverpath HTTP/1.1"'
)
def test_health_check_failover_url_netloc_list(self):
parameter_dict = self.assertSlaveBase(
'health-check-failover-url-netloc-list')
slave_parameter_dict = self.getSlaveParameterDictDict()[
'health-check-failover-url-netloc-list']
# check normal access
result = fakeHTTPSResult(parameter_dict['domain'], '/path')
self.assertNotIn('X-Backend-Identification', result.headers)
# start replying with bad status code
result = requests.put(
self.backend_url + slave_parameter_dict[
'health-check-http-path'].strip('/'),
headers={'X-Reply-Status-Code': '502'})
self.assertEqual(result.status_code, httplib.CREATED)
self.assertEqual(result.status_code, httplib.CREATED)
def restoreBackend():
result = requests.put(
self.backend_url + slave_parameter_dict[
'health-check-http-path'].strip('/'),
headers={})
self.assertEqual(result.status_code, httplib.CREATED)
self.addCleanup(restoreBackend)
time.sleep(3) # > health-check-timeout + health-check-interval
# check failover, uses netloc
result = fakeHTTPSResult(parameter_dict['domain'], '/path')
self.assertEqual(
result.headers['X-Backend-Identification'],
'netloc'
)
def test_health_check_failover_url_auth_to_backend(self):
parameter_dict = self.assertSlaveBase(
'health-check-failover-url-auth-to-backend')
slave_parameter_dict = self.getSlaveParameterDictDict()[
'health-check-failover-url-auth-to-backend']
self.startAuthenticatedServerProcess()
self.addCleanup(self.stopAuthenticatedServerProcess)
# assert that you can't fetch nothing without key
try:
requests.get(self.backend_https_auth_url, verify=False)
except Exception:
pass
else:
self.fail(
'Access to %r shall be not possible without certificate' % (
self.backend_https_auth_url,))
# check normal access
result = fakeHTTPResult(parameter_dict['domain'], '/path')
self.assertEqualResultJson(result, 'Path', '/url/path')
self.assertNotIn('X-Backend-Identification', result.headers)
result = fakeHTTPSResult(parameter_dict['domain'], '/path')
self.assertEqual(self.certificate_pem, der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/https-url/path')
self.assertNotIn('X-Backend-Identification', result.headers)
# start replying with bad status code
result = requests.put(
self.backend_url + slave_parameter_dict[
'health-check-http-path'].strip('/'),
headers={'X-Reply-Status-Code': '502'})
self.assertEqual(result.status_code, httplib.CREATED)
time.sleep(3) # > health-check-timeout + health-check-interval
result = fakeHTTPSResult(parameter_dict['domain'], '/failoverpath')
self.assertEqual(self.certificate_pem, der2pem(result.peercert))
self.assertEqualResultJson(
result, 'Path', '/failover-https-url?a=b&c=/failoverpath')
self.assertEqual(
'Auth Backend', result.headers['X-Backend-Identification'])
result = fakeHTTPResult(parameter_dict['domain'], '/failoverpath')
self.assertEqualResultJson(
result, 'Path', '/failover-url?a=b&c=/failoverpath')
self.assertEqual(
'Auth Backend', result.headers['X-Backend-Identification'])
def test_health_check_failover_url_ssl_proxy_verified(self):
parameter_dict = self.assertSlaveBase(
'health-check-failover-url-ssl-proxy-verified')
slave_parameter_dict = self.getSlaveParameterDictDict()[
'health-check-failover-url-ssl-proxy-verified']
# check normal access
result = fakeHTTPSResult(parameter_dict['domain'], '/path')
self.assertEqual(self.certificate_pem, der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/path')
# start replying with bad status code
result = requests.put(
self.backend_url + slave_parameter_dict[
'health-check-http-path'].strip('/'),
headers={'X-Reply-Status-Code': '502'})
self.assertEqual(result.status_code, httplib.CREATED)
time.sleep(3) # > health-check-timeout + health-check-interval
result = fakeHTTPSResult(
parameter_dict['domain'], '/test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/test-path')
def test_health_check_failover_url_ssl_proxy_unverified(self):
parameter_dict = self.assertSlaveBase(
'health-check-failover-url-ssl-proxy-verify-unverified')
slave_parameter_dict = self.getSlaveParameterDictDict()[
'health-check-failover-url-ssl-proxy-verify-unverified']
# check normal access
result = fakeHTTPSResult(parameter_dict['domain'], '/path')
self.assertEqual(self.certificate_pem, der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/path')
# start replying with bad status code
result = requests.put(
self.backend_url + slave_parameter_dict[
'health-check-http-path'].strip('/'),
headers={'X-Reply-Status-Code': '502'})
self.assertEqual(result.status_code, httplib.CREATED)
time.sleep(3) # > health-check-timeout + health-check-interval
result = fakeHTTPSResult(
parameter_dict['domain'], '/test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
# as ssl proxy verification failed, service is unavailable
self.assertEqual(result.status_code, httplib.SERVICE_UNAVAILABLE)
def test_health_check_failover_url_ssl_proxy_missing(self):
parameter_dict = self.assertSlaveBase(
'health-check-failover-url-ssl-proxy-verify-missing')
slave_parameter_dict = self.getSlaveParameterDictDict()[
'health-check-failover-url-ssl-proxy-verify-missing']
# check normal access
result = fakeHTTPSResult(parameter_dict['domain'], '/path')
self.assertEqual(self.certificate_pem, der2pem(result.peercert))
self.assertEqualResultJson(result, 'Path', '/path')
# start replying with bad status code
result = requests.put(
self.backend_url + slave_parameter_dict[
'health-check-http-path'].strip('/'),
headers={'X-Reply-Status-Code': '502'})
self.assertEqual(result.status_code, httplib.CREATED)
time.sleep(3) # > health-check-timeout + health-check-interval
result = fakeHTTPSResult(
parameter_dict['domain'], '/test-path')
self.assertEqual(
self.certificate_pem,
der2pem(result.peercert))
# as ssl proxy verification failed, service is unavailable
self.assertEqual(result.status_code, httplib.SERVICE_UNAVAILABLE)
if __name__ == '__main__':
class HTTP6Server(ThreadedHTTPServer):
address_family = socket.AF_INET6
ip, port = sys.argv[1], int(sys.argv[2])
if ':' in ip:
klass = HTTP6Server
url_template = 'http://[%s]:%s/'
else:
klass = ThreadedHTTPServer
url_template = 'http://%s:%s/'
server = klass((ip, port), TestHandler)
print url_template % server.server_address[:2]
server.serve_forever()
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data/ 0000775 0000000 0000000 00000000000 14241130220 0031407 5 ustar 00root root 0000000 0000000 test.TestEnableHttp2ByDefaultDefaultSlave.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000000644 14241130220 0050134 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestEnableHttp2ByDefaultDefaultSlave.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000001713 14241130220 0046614 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/httpd/_dummy-cached_access_log
T-2/var/log/httpd/_dummy-cached_backend_log
T-2/var/log/httpd/_dummy-cached_error_log
T-2/var/log/httpd/_enable-http2-default_access_log
T-2/var/log/httpd/_enable-http2-default_error_log
T-2/var/log/httpd/_enable-http2-false_access_log
T-2/var/log/httpd/_enable-http2-false_error_log
T-2/var/log/httpd/_enable-http2-true_access_log
T-2/var/log/httpd/_enable-http2-true_error_log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestEnableHttp2ByDefaultDefaultSlave.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000003772 14241130220 0047340 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestEnableHttp2ByDefaultDefaultSlave.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0046637 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestEnableHttp2ByDefaultDefaultSlave.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003452 14241130220 0047424 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
test.TestEnableHttp2ByDefaultDefaultSlaveGlobalDisableHttp2.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000000644 14241130220 0053463 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestEnableHttp2ByDefaultDefaultSlaveGlobalDisableHttp2.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000001713 14241130220 0052143 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/httpd/_dummy-cached_access_log
T-2/var/log/httpd/_dummy-cached_backend_log
T-2/var/log/httpd/_dummy-cached_error_log
T-2/var/log/httpd/_enable-http2-default_access_log
T-2/var/log/httpd/_enable-http2-default_error_log
T-2/var/log/httpd/_enable-http2-false_access_log
T-2/var/log/httpd/_enable-http2-false_error_log
T-2/var/log/httpd/_enable-http2-true_access_log
T-2/var/log/httpd/_enable-http2-true_error_log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestEnableHttp2ByDefaultDefaultSlaveGlobalDisableHttp2.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000003772 14241130220 0052667 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestEnableHttp2ByDefaultDefaultSlaveGlobalDisableHttp2.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0052166 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestEnableHttp2ByDefaultDefaultSlaveGlobalDisableHttp2.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003452 14241130220 0052753 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
test.TestEnableHttp2ByDefaultFalseSlave.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000000644 14241130220 0047602 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestEnableHttp2ByDefaultFalseSlave.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000001713 14241130220 0046262 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/httpd/_dummy-cached_access_log
T-2/var/log/httpd/_dummy-cached_backend_log
T-2/var/log/httpd/_dummy-cached_error_log
T-2/var/log/httpd/_enable-http2-default_access_log
T-2/var/log/httpd/_enable-http2-default_error_log
T-2/var/log/httpd/_enable-http2-false_access_log
T-2/var/log/httpd/_enable-http2-false_error_log
T-2/var/log/httpd/_enable-http2-true_access_log
T-2/var/log/httpd/_enable-http2-true_error_log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestEnableHttp2ByDefaultFalseSlave.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000003772 14241130220 0047006 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestEnableHttp2ByDefaultFalseSlave.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0046305 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestEnableHttp2ByDefaultFalseSlave.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003452 14241130220 0047072 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
test.TestEnableHttp2ByDefaultFalseSlaveGlobalDisableHttp2.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000000644 14241130220 0053131 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestEnableHttp2ByDefaultFalseSlaveGlobalDisableHttp2.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000001713 14241130220 0051611 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/httpd/_dummy-cached_access_log
T-2/var/log/httpd/_dummy-cached_backend_log
T-2/var/log/httpd/_dummy-cached_error_log
T-2/var/log/httpd/_enable-http2-default_access_log
T-2/var/log/httpd/_enable-http2-default_error_log
T-2/var/log/httpd/_enable-http2-false_access_log
T-2/var/log/httpd/_enable-http2-false_error_log
T-2/var/log/httpd/_enable-http2-true_access_log
T-2/var/log/httpd/_enable-http2-true_error_log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestEnableHttp2ByDefaultFalseSlaveGlobalDisableHttp2.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000003772 14241130220 0052335 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestEnableHttp2ByDefaultFalseSlaveGlobalDisableHttp2.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0051634 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestEnableHttp2ByDefaultFalseSlaveGlobalDisableHttp2.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003452 14241130220 0052421 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
test.TestMasterAIKCDisabledAIBCCDisabledRequest.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000000644 14241130220 0050762 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestMasterAIKCDisabledAIBCCDisabledRequest.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000001045 14241130220 0047440 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestMasterAIKCDisabledAIBCCDisabledRequest.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000003530 14241130220 0050156 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestMasterAIKCDisabledAIBCCDisabledRequest.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0047465 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestMasterAIKCDisabledAIBCCDisabledRequest.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003313 14241130220 0050246 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
test.TestMasterRequest.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000000644 14241130220 0044530 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestMasterRequest.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000001045 14241130220 0043206 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestMasterRequest.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000003772 14241130220 0043734 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestMasterRequest.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0043233 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestMasterRequest.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003452 14241130220 0044020 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
test.TestMasterRequestDomain.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000000644 14241130220 0045660 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestMasterRequestDomain.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000001045 14241130220 0044336 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestMasterRequestDomain.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000003772 14241130220 0045064 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestMasterRequestDomain.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0044363 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestMasterRequestDomain.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003452 14241130220 0045150 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
test.TestRe6stVerificationUrlDefaultSlave.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000000644 14241130220 0050255 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestRe6stVerificationUrlDefaultSlave.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000001160 14241130220 0046731 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/httpd/_default_access_log
T-2/var/log/httpd/_default_error_log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestRe6stVerificationUrlDefaultSlave.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000003772 14241130220 0047461 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestRe6stVerificationUrlDefaultSlave.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0046760 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestRe6stVerificationUrlDefaultSlave.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003452 14241130220 0047545 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
test.TestRe6stVerificationUrlSlave.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000000644 14241130220 0046750 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestRe6stVerificationUrlSlave.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000001227 14241130220 0045430 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/httpd/_default_access_log
T-2/var/log/httpd/_default_backend_log
T-2/var/log/httpd/_default_error_log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestRe6stVerificationUrlSlave.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000003772 14241130220 0046154 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestRe6stVerificationUrlSlave.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0045453 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestRe6stVerificationUrlSlave.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003452 14241130220 0046240 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
test.TestReplicateSlave.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000001112 14241130220 0044616 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
T-3/etc/cron.d/logrotate
T-3/etc/cron.d/monitor-configurator
T-3/etc/cron.d/monitor-globalstate
T-3/etc/cron.d/monitor_collect
T-3/etc/cron.d/trafficserver-logrotate
test.TestReplicateSlave.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000002052 14241130220 0043304 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/httpd/_replicate_access_log
T-2/var/log/httpd/_replicate_backend_log
T-2/var/log/httpd/_replicate_error_log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
T-3/var/log/backend-haproxy.log
T-3/var/log/expose-csr.log
T-3/var/log/frontend-access.log
T-3/var/log/frontend-error.log
T-3/var/log/httpd/_replicate_access_log
T-3/var/log/httpd/_replicate_error_log
T-3/var/log/monitor-httpd-access.log
T-3/var/log/monitor-httpd-error.log
T-3/var/log/slave-introspection-access.log
T-3/var/log/slave-introspection-error.log
T-3/var/log/trafficserver/manager.log
test.TestReplicateSlave.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000006132 14241130220 0044024 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-2.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
T-3/etc/plugin/__init__.py
T-3/etc/plugin/backend-client-caucase-updater.py
T-3/etc/plugin/backend-haproxy-configuration.py
T-3/etc/plugin/backend-haproxy-statistic-frontend.py
T-3/etc/plugin/backend_haproxy_http.py
T-3/etc/plugin/backend_haproxy_https.py
T-3/etc/plugin/buildout-T-3-status.py
T-3/etc/plugin/caddy_frontend_ipv4_http.py
T-3/etc/plugin/caddy_frontend_ipv4_https.py
T-3/etc/plugin/caddy_frontend_ipv6_http.py
T-3/etc/plugin/caddy_frontend_ipv6_https.py
T-3/etc/plugin/caucase-updater.py
T-3/etc/plugin/check-free-disk-space.py
T-3/etc/plugin/expose-csr-ip-port-listening.py
T-3/etc/plugin/frontend-caddy-configuration-promise.py
T-3/etc/plugin/monitor-bootstrap-status.py
T-3/etc/plugin/monitor-http-frontend.py
T-3/etc/plugin/monitor-httpd-listening-on-tcp.py
T-3/etc/plugin/promise-logrotate-setup.py
T-3/etc/plugin/re6st-connectivity.py
T-3/etc/plugin/slave-introspection-configuration.py
T-3/etc/plugin/slave_introspection_https.py
T-3/etc/plugin/trafficserver-cache-availability.py
T-3/etc/plugin/trafficserver-port-listening.py
test.TestReplicateSlave.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001600 14241130220 0043325 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
T-3/var/run/backend-haproxy.pid
T-3/var/run/backend_haproxy_configuration_last_state
T-3/var/run/backend_haproxy_graceful_configuration_state_signature
T-3/var/run/graceful_configuration_state_signature
T-3/var/run/slave_introspection_configuration_last_state
T-3/var/run/slave_introspection_graceful_configuration_state_signature
test.TestReplicateSlave.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000005376 14241130220 0044126 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
T-3:6tunnel-11080-{hash-generic}-on-watch STOPPED
T-3:6tunnel-11443-{hash-generic}-on-watch STOPPED
T-3:backend-client-login-certificate-caucase-updater-on-watch STOPPED
T-3:backend-haproxy-{hash-generic}-on-watch STOPPED
T-3:backend-haproxy-rsyslogd-{hash-generic}-on-watch STOPPED
T-3:backend-haproxy-safe-graceful EXITED
T-3:bootstrap-monitor EXITED
T-3:certificate_authority-{hash-generic}-on-watch STOPPED
T-3:crond-{hash-generic}-on-watch STOPPED
T-3:expose-csr-{hash-generic}-on-watch STOPPED
T-3:frontend-caddy-safe-graceful EXITED
T-3:frontend_caddy-{hash-caddy-T-3}-on-watch STOPPED
T-3:kedifa-login-certificate-caucase-updater-on-watch STOPPED
T-3:kedifa-updater-{hash-generic}-on-watch STOPPED
T-3:monitor-httpd-{hash-generic}-on-watch STOPPED
T-3:monitor-httpd-graceful EXITED
T-3:slave-instrospection-nginx-{hash-generic}-on-watch STOPPED
T-3:slave-introspection-safe-graceful EXITED
T-3:trafficserver-{hash-generic}-on-watch STOPPED
T-3:trafficserver-reload EXITED
test.TestSlave.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000000644 14241130220 0042776 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestSlave.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000021757 14241130220 0041470 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/httpd/_Url_access_log
T-2/var/log/httpd/_Url_backend_log
T-2/var/log/httpd/_Url_error_log
T-2/var/log/httpd/_auth-to-backend-backend-ignore_access_log
T-2/var/log/httpd/_auth-to-backend-backend-ignore_backend_log
T-2/var/log/httpd/_auth-to-backend-backend-ignore_error_log
T-2/var/log/httpd/_auth-to-backend-not-configured_access_log
T-2/var/log/httpd/_auth-to-backend-not-configured_backend_log
T-2/var/log/httpd/_auth-to-backend-not-configured_error_log
T-2/var/log/httpd/_auth-to-backend_access_log
T-2/var/log/httpd/_auth-to-backend_backend_log
T-2/var/log/httpd/_auth-to-backend_error_log
T-2/var/log/httpd/_bad-backend_access_log
T-2/var/log/httpd/_bad-backend_backend_log
T-2/var/log/httpd/_bad-backend_error_log
T-2/var/log/httpd/_ciphers_access_log
T-2/var/log/httpd/_ciphers_error_log
T-2/var/log/httpd/_custom_domain_access_log
T-2/var/log/httpd/_custom_domain_backend_log
T-2/var/log/httpd/_custom_domain_error_log
T-2/var/log/httpd/_custom_domain_server_alias_access_log
T-2/var/log/httpd/_custom_domain_server_alias_backend_log
T-2/var/log/httpd/_custom_domain_server_alias_error_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_access_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_backend_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_error_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_ssl_ca_crt_access_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_ssl_ca_crt_backend_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_ssl_ca_crt_error_log
T-2/var/log/httpd/_custom_domain_wildcard_access_log
T-2/var/log/httpd/_custom_domain_wildcard_backend_log
T-2/var/log/httpd/_custom_domain_wildcard_error_log
T-2/var/log/httpd/_disabled-cookie-list_access_log
T-2/var/log/httpd/_disabled-cookie-list_backend_log
T-2/var/log/httpd/_disabled-cookie-list_error_log
T-2/var/log/httpd/_empty_access_log
T-2/var/log/httpd/_empty_error_log
T-2/var/log/httpd/_enable-http2-default_access_log
T-2/var/log/httpd/_enable-http2-default_backend_log
T-2/var/log/httpd/_enable-http2-default_error_log
T-2/var/log/httpd/_enable-http2-false_access_log
T-2/var/log/httpd/_enable-http2-false_backend_log
T-2/var/log/httpd/_enable-http2-false_error_log
T-2/var/log/httpd/_enable_cache-disable-no-cache-request_access_log
T-2/var/log/httpd/_enable_cache-disable-no-cache-request_backend_log
T-2/var/log/httpd/_enable_cache-disable-no-cache-request_error_log
T-2/var/log/httpd/_enable_cache-disable-via-header_access_log
T-2/var/log/httpd/_enable_cache-disable-via-header_backend_log
T-2/var/log/httpd/_enable_cache-disable-via-header_error_log
T-2/var/log/httpd/_enable_cache-https-only-false_access_log
T-2/var/log/httpd/_enable_cache-https-only-false_backend_log
T-2/var/log/httpd/_enable_cache-https-only-false_error_log
T-2/var/log/httpd/_enable_cache_access_log
T-2/var/log/httpd/_enable_cache_backend_log
T-2/var/log/httpd/_enable_cache_custom_domain_access_log
T-2/var/log/httpd/_enable_cache_custom_domain_backend_log
T-2/var/log/httpd/_enable_cache_custom_domain_error_log
T-2/var/log/httpd/_enable_cache_error_log
T-2/var/log/httpd/_enable_cache_server_alias_access_log
T-2/var/log/httpd/_enable_cache_server_alias_backend_log
T-2/var/log/httpd/_enable_cache_server_alias_error_log
T-2/var/log/httpd/_https-only_access_log
T-2/var/log/httpd/_https-only_backend_log
T-2/var/log/httpd/_https-only_error_log
T-2/var/log/httpd/_https-url-netloc-list_access_log
T-2/var/log/httpd/_https-url-netloc-list_backend_log
T-2/var/log/httpd/_https-url-netloc-list_error_log
T-2/var/log/httpd/_monitor-ipv4-test_access_log
T-2/var/log/httpd/_monitor-ipv4-test_error_log
T-2/var/log/httpd/_monitor-ipv6-test_access_log
T-2/var/log/httpd/_monitor-ipv6-test_error_log
T-2/var/log/httpd/_prefer-gzip-encoding-to-backend-https-only_access_log
T-2/var/log/httpd/_prefer-gzip-encoding-to-backend-https-only_backend_log
T-2/var/log/httpd/_prefer-gzip-encoding-to-backend-https-only_error_log
T-2/var/log/httpd/_prefer-gzip-encoding-to-backend_access_log
T-2/var/log/httpd/_prefer-gzip-encoding-to-backend_backend_log
T-2/var/log/httpd/_prefer-gzip-encoding-to-backend_error_log
T-2/var/log/httpd/_server-alias-duplicated_access_log
T-2/var/log/httpd/_server-alias-duplicated_backend_log
T-2/var/log/httpd/_server-alias-duplicated_error_log
T-2/var/log/httpd/_server-alias-empty_access_log
T-2/var/log/httpd/_server-alias-empty_backend_log
T-2/var/log/httpd/_server-alias-empty_error_log
T-2/var/log/httpd/_server-alias-wildcard_access_log
T-2/var/log/httpd/_server-alias-wildcard_backend_log
T-2/var/log/httpd/_server-alias-wildcard_error_log
T-2/var/log/httpd/_server-alias_access_log
T-2/var/log/httpd/_server-alias_backend_log
T-2/var/log/httpd/_server-alias_custom_domain-duplicated_access_log
T-2/var/log/httpd/_server-alias_custom_domain-duplicated_backend_log
T-2/var/log/httpd/_server-alias_custom_domain-duplicated_error_log
T-2/var/log/httpd/_server-alias_error_log
T-2/var/log/httpd/_ssl-proxy-verify-unverified_access_log
T-2/var/log/httpd/_ssl-proxy-verify-unverified_backend_log
T-2/var/log/httpd/_ssl-proxy-verify-unverified_error_log
T-2/var/log/httpd/_ssl-proxy-verify_ssl_proxy_ca_crt-unverified_access_log
T-2/var/log/httpd/_ssl-proxy-verify_ssl_proxy_ca_crt-unverified_backend_log
T-2/var/log/httpd/_ssl-proxy-verify_ssl_proxy_ca_crt-unverified_error_log
T-2/var/log/httpd/_ssl-proxy-verify_ssl_proxy_ca_crt_access_log
T-2/var/log/httpd/_ssl-proxy-verify_ssl_proxy_ca_crt_backend_log
T-2/var/log/httpd/_ssl-proxy-verify_ssl_proxy_ca_crt_error_log
T-2/var/log/httpd/_ssl_ca_crt_does_not_match_access_log
T-2/var/log/httpd/_ssl_ca_crt_does_not_match_backend_log
T-2/var/log/httpd/_ssl_ca_crt_does_not_match_error_log
T-2/var/log/httpd/_ssl_ca_crt_garbage_access_log
T-2/var/log/httpd/_ssl_ca_crt_garbage_backend_log
T-2/var/log/httpd/_ssl_ca_crt_garbage_error_log
T-2/var/log/httpd/_ssl_ca_crt_only_access_log
T-2/var/log/httpd/_ssl_ca_crt_only_backend_log
T-2/var/log/httpd/_ssl_ca_crt_only_error_log
T-2/var/log/httpd/_type-notebook_access_log
T-2/var/log/httpd/_type-notebook_backend_log
T-2/var/log/httpd/_type-notebook_error_log
T-2/var/log/httpd/_type-redirect-custom_domain_access_log
T-2/var/log/httpd/_type-redirect-custom_domain_error_log
T-2/var/log/httpd/_type-redirect_access_log
T-2/var/log/httpd/_type-redirect_error_log
T-2/var/log/httpd/_type-websocket-websocket-path-list-websocket-transparent-false_access_log
T-2/var/log/httpd/_type-websocket-websocket-path-list-websocket-transparent-false_backend_log
T-2/var/log/httpd/_type-websocket-websocket-path-list-websocket-transparent-false_error_log
T-2/var/log/httpd/_type-websocket-websocket-path-list_access_log
T-2/var/log/httpd/_type-websocket-websocket-path-list_backend_log
T-2/var/log/httpd/_type-websocket-websocket-path-list_error_log
T-2/var/log/httpd/_type-websocket-websocket-transparent-false_access_log
T-2/var/log/httpd/_type-websocket-websocket-transparent-false_backend_log
T-2/var/log/httpd/_type-websocket-websocket-transparent-false_error_log
T-2/var/log/httpd/_type-websocket_access_log
T-2/var/log/httpd/_type-websocket_backend_log
T-2/var/log/httpd/_type-websocket_error_log
T-2/var/log/httpd/_type-zope-default-path_access_log
T-2/var/log/httpd/_type-zope-default-path_backend_log
T-2/var/log/httpd/_type-zope-default-path_error_log
T-2/var/log/httpd/_type-zope-path_access_log
T-2/var/log/httpd/_type-zope-path_backend_log
T-2/var/log/httpd/_type-zope-path_error_log
T-2/var/log/httpd/_type-zope-prefer-gzip-encoding-to-backend-https-only_access_log
T-2/var/log/httpd/_type-zope-prefer-gzip-encoding-to-backend-https-only_backend_log
T-2/var/log/httpd/_type-zope-prefer-gzip-encoding-to-backend-https-only_error_log
T-2/var/log/httpd/_type-zope-prefer-gzip-encoding-to-backend_access_log
T-2/var/log/httpd/_type-zope-prefer-gzip-encoding-to-backend_backend_log
T-2/var/log/httpd/_type-zope-prefer-gzip-encoding-to-backend_error_log
T-2/var/log/httpd/_type-zope-virtualhostroot-http-port_access_log
T-2/var/log/httpd/_type-zope-virtualhostroot-http-port_backend_log
T-2/var/log/httpd/_type-zope-virtualhostroot-http-port_error_log
T-2/var/log/httpd/_type-zope-virtualhostroot-https-port_access_log
T-2/var/log/httpd/_type-zope-virtualhostroot-https-port_backend_log
T-2/var/log/httpd/_type-zope-virtualhostroot-https-port_error_log
T-2/var/log/httpd/_type-zope_access_log
T-2/var/log/httpd/_type-zope_backend_log
T-2/var/log/httpd/_type-zope_error_log
T-2/var/log/httpd/_url-netloc-list_access_log
T-2/var/log/httpd/_url-netloc-list_backend_log
T-2/var/log/httpd/_url-netloc-list_error_log
T-2/var/log/httpd/_url_https-url_access_log
T-2/var/log/httpd/_url_https-url_backend_log
T-2/var/log/httpd/_url_https-url_error_log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestSlave.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000004174 14241130220 0042177 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-_monitor-ipv4-test-ipv4-packet-list-test.py
T-2/etc/plugin/check-_monitor-ipv6-test-ipv6-packet-list-test.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestSlave.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0041501 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestSlave.test_master_partition_state-CADDY.txt 0000664 0000000 0000000 00000001302 14241130220 0043421 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data SetEnvIf Origin "^http(s)?://(.+\.)?(monitor\.app\.officejs\.com)$" ORIGIN_DOMAIN=$0
Header always set Access-Control-Allow-Origin "%{ORIGIN_DOMAIN}e" env=ORIGIN_DOMAIN
Header always set Access-Control-Allow-Credentials "true" env=ORIGIN_DOMAIN
Header always set Access-Control-Allow-Methods "PROPFIND, PROPPATCH, COPY, MOVE, DELETE, MKCOL, LOCK, UNLOCK, PUT, GETLIB, VERSION-CONTROL, CHECKIN, CHECKOUT, UNCHECKOUT, REPORT, UPDATE, CANCELUPLOAD, HEAD, OPTIONS, GET, POST" env=ORIGIN_DOMAIN
Header always set Access-Control-Allow-Headers "Overwrite, Destination, Content-Type, Depth, User-Agent, X-File-Size, X-Requested-With, If-Modified-Since, X-File-Name, Cache-Control, Authorization" env=ORIGIN_DOMAIN
test.TestSlave.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003452 14241130220 0042266 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
test.TestSlaveCiphers.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000000644 14241130220 0044314 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestSlaveCiphers.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000001455 14241130220 0042777 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/httpd/_default_ciphers_access_log
T-2/var/log/httpd/_default_ciphers_backend_log
T-2/var/log/httpd/_default_ciphers_error_log
T-2/var/log/httpd/_own_ciphers_access_log
T-2/var/log/httpd/_own_ciphers_backend_log
T-2/var/log/httpd/_own_ciphers_error_log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestSlaveCiphers.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000003772 14241130220 0043520 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestSlaveCiphers.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0043017 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestSlaveCiphers.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003452 14241130220 0043604 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
test.TestSlaveGlobalDisableHttp2.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000000644 14241130220 0046325 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestSlaveGlobalDisableHttp2.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000021757 14241130220 0045017 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/httpd/_Url_access_log
T-2/var/log/httpd/_Url_backend_log
T-2/var/log/httpd/_Url_error_log
T-2/var/log/httpd/_auth-to-backend-backend-ignore_access_log
T-2/var/log/httpd/_auth-to-backend-backend-ignore_backend_log
T-2/var/log/httpd/_auth-to-backend-backend-ignore_error_log
T-2/var/log/httpd/_auth-to-backend-not-configured_access_log
T-2/var/log/httpd/_auth-to-backend-not-configured_backend_log
T-2/var/log/httpd/_auth-to-backend-not-configured_error_log
T-2/var/log/httpd/_auth-to-backend_access_log
T-2/var/log/httpd/_auth-to-backend_backend_log
T-2/var/log/httpd/_auth-to-backend_error_log
T-2/var/log/httpd/_bad-backend_access_log
T-2/var/log/httpd/_bad-backend_backend_log
T-2/var/log/httpd/_bad-backend_error_log
T-2/var/log/httpd/_ciphers_access_log
T-2/var/log/httpd/_ciphers_error_log
T-2/var/log/httpd/_custom_domain_access_log
T-2/var/log/httpd/_custom_domain_backend_log
T-2/var/log/httpd/_custom_domain_error_log
T-2/var/log/httpd/_custom_domain_server_alias_access_log
T-2/var/log/httpd/_custom_domain_server_alias_backend_log
T-2/var/log/httpd/_custom_domain_server_alias_error_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_access_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_backend_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_error_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_ssl_ca_crt_access_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_ssl_ca_crt_backend_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_ssl_ca_crt_error_log
T-2/var/log/httpd/_custom_domain_wildcard_access_log
T-2/var/log/httpd/_custom_domain_wildcard_backend_log
T-2/var/log/httpd/_custom_domain_wildcard_error_log
T-2/var/log/httpd/_disabled-cookie-list_access_log
T-2/var/log/httpd/_disabled-cookie-list_backend_log
T-2/var/log/httpd/_disabled-cookie-list_error_log
T-2/var/log/httpd/_empty_access_log
T-2/var/log/httpd/_empty_error_log
T-2/var/log/httpd/_enable-http2-default_access_log
T-2/var/log/httpd/_enable-http2-default_backend_log
T-2/var/log/httpd/_enable-http2-default_error_log
T-2/var/log/httpd/_enable-http2-false_access_log
T-2/var/log/httpd/_enable-http2-false_backend_log
T-2/var/log/httpd/_enable-http2-false_error_log
T-2/var/log/httpd/_enable_cache-disable-no-cache-request_access_log
T-2/var/log/httpd/_enable_cache-disable-no-cache-request_backend_log
T-2/var/log/httpd/_enable_cache-disable-no-cache-request_error_log
T-2/var/log/httpd/_enable_cache-disable-via-header_access_log
T-2/var/log/httpd/_enable_cache-disable-via-header_backend_log
T-2/var/log/httpd/_enable_cache-disable-via-header_error_log
T-2/var/log/httpd/_enable_cache-https-only-false_access_log
T-2/var/log/httpd/_enable_cache-https-only-false_backend_log
T-2/var/log/httpd/_enable_cache-https-only-false_error_log
T-2/var/log/httpd/_enable_cache_access_log
T-2/var/log/httpd/_enable_cache_backend_log
T-2/var/log/httpd/_enable_cache_custom_domain_access_log
T-2/var/log/httpd/_enable_cache_custom_domain_backend_log
T-2/var/log/httpd/_enable_cache_custom_domain_error_log
T-2/var/log/httpd/_enable_cache_error_log
T-2/var/log/httpd/_enable_cache_server_alias_access_log
T-2/var/log/httpd/_enable_cache_server_alias_backend_log
T-2/var/log/httpd/_enable_cache_server_alias_error_log
T-2/var/log/httpd/_https-only_access_log
T-2/var/log/httpd/_https-only_backend_log
T-2/var/log/httpd/_https-only_error_log
T-2/var/log/httpd/_https-url-netloc-list_access_log
T-2/var/log/httpd/_https-url-netloc-list_backend_log
T-2/var/log/httpd/_https-url-netloc-list_error_log
T-2/var/log/httpd/_monitor-ipv4-test_access_log
T-2/var/log/httpd/_monitor-ipv4-test_error_log
T-2/var/log/httpd/_monitor-ipv6-test_access_log
T-2/var/log/httpd/_monitor-ipv6-test_error_log
T-2/var/log/httpd/_prefer-gzip-encoding-to-backend-https-only_access_log
T-2/var/log/httpd/_prefer-gzip-encoding-to-backend-https-only_backend_log
T-2/var/log/httpd/_prefer-gzip-encoding-to-backend-https-only_error_log
T-2/var/log/httpd/_prefer-gzip-encoding-to-backend_access_log
T-2/var/log/httpd/_prefer-gzip-encoding-to-backend_backend_log
T-2/var/log/httpd/_prefer-gzip-encoding-to-backend_error_log
T-2/var/log/httpd/_server-alias-duplicated_access_log
T-2/var/log/httpd/_server-alias-duplicated_backend_log
T-2/var/log/httpd/_server-alias-duplicated_error_log
T-2/var/log/httpd/_server-alias-empty_access_log
T-2/var/log/httpd/_server-alias-empty_backend_log
T-2/var/log/httpd/_server-alias-empty_error_log
T-2/var/log/httpd/_server-alias-wildcard_access_log
T-2/var/log/httpd/_server-alias-wildcard_backend_log
T-2/var/log/httpd/_server-alias-wildcard_error_log
T-2/var/log/httpd/_server-alias_access_log
T-2/var/log/httpd/_server-alias_backend_log
T-2/var/log/httpd/_server-alias_custom_domain-duplicated_access_log
T-2/var/log/httpd/_server-alias_custom_domain-duplicated_backend_log
T-2/var/log/httpd/_server-alias_custom_domain-duplicated_error_log
T-2/var/log/httpd/_server-alias_error_log
T-2/var/log/httpd/_ssl-proxy-verify-unverified_access_log
T-2/var/log/httpd/_ssl-proxy-verify-unverified_backend_log
T-2/var/log/httpd/_ssl-proxy-verify-unverified_error_log
T-2/var/log/httpd/_ssl-proxy-verify_ssl_proxy_ca_crt-unverified_access_log
T-2/var/log/httpd/_ssl-proxy-verify_ssl_proxy_ca_crt-unverified_backend_log
T-2/var/log/httpd/_ssl-proxy-verify_ssl_proxy_ca_crt-unverified_error_log
T-2/var/log/httpd/_ssl-proxy-verify_ssl_proxy_ca_crt_access_log
T-2/var/log/httpd/_ssl-proxy-verify_ssl_proxy_ca_crt_backend_log
T-2/var/log/httpd/_ssl-proxy-verify_ssl_proxy_ca_crt_error_log
T-2/var/log/httpd/_ssl_ca_crt_does_not_match_access_log
T-2/var/log/httpd/_ssl_ca_crt_does_not_match_backend_log
T-2/var/log/httpd/_ssl_ca_crt_does_not_match_error_log
T-2/var/log/httpd/_ssl_ca_crt_garbage_access_log
T-2/var/log/httpd/_ssl_ca_crt_garbage_backend_log
T-2/var/log/httpd/_ssl_ca_crt_garbage_error_log
T-2/var/log/httpd/_ssl_ca_crt_only_access_log
T-2/var/log/httpd/_ssl_ca_crt_only_backend_log
T-2/var/log/httpd/_ssl_ca_crt_only_error_log
T-2/var/log/httpd/_type-notebook_access_log
T-2/var/log/httpd/_type-notebook_backend_log
T-2/var/log/httpd/_type-notebook_error_log
T-2/var/log/httpd/_type-redirect-custom_domain_access_log
T-2/var/log/httpd/_type-redirect-custom_domain_error_log
T-2/var/log/httpd/_type-redirect_access_log
T-2/var/log/httpd/_type-redirect_error_log
T-2/var/log/httpd/_type-websocket-websocket-path-list-websocket-transparent-false_access_log
T-2/var/log/httpd/_type-websocket-websocket-path-list-websocket-transparent-false_backend_log
T-2/var/log/httpd/_type-websocket-websocket-path-list-websocket-transparent-false_error_log
T-2/var/log/httpd/_type-websocket-websocket-path-list_access_log
T-2/var/log/httpd/_type-websocket-websocket-path-list_backend_log
T-2/var/log/httpd/_type-websocket-websocket-path-list_error_log
T-2/var/log/httpd/_type-websocket-websocket-transparent-false_access_log
T-2/var/log/httpd/_type-websocket-websocket-transparent-false_backend_log
T-2/var/log/httpd/_type-websocket-websocket-transparent-false_error_log
T-2/var/log/httpd/_type-websocket_access_log
T-2/var/log/httpd/_type-websocket_backend_log
T-2/var/log/httpd/_type-websocket_error_log
T-2/var/log/httpd/_type-zope-default-path_access_log
T-2/var/log/httpd/_type-zope-default-path_backend_log
T-2/var/log/httpd/_type-zope-default-path_error_log
T-2/var/log/httpd/_type-zope-path_access_log
T-2/var/log/httpd/_type-zope-path_backend_log
T-2/var/log/httpd/_type-zope-path_error_log
T-2/var/log/httpd/_type-zope-prefer-gzip-encoding-to-backend-https-only_access_log
T-2/var/log/httpd/_type-zope-prefer-gzip-encoding-to-backend-https-only_backend_log
T-2/var/log/httpd/_type-zope-prefer-gzip-encoding-to-backend-https-only_error_log
T-2/var/log/httpd/_type-zope-prefer-gzip-encoding-to-backend_access_log
T-2/var/log/httpd/_type-zope-prefer-gzip-encoding-to-backend_backend_log
T-2/var/log/httpd/_type-zope-prefer-gzip-encoding-to-backend_error_log
T-2/var/log/httpd/_type-zope-virtualhostroot-http-port_access_log
T-2/var/log/httpd/_type-zope-virtualhostroot-http-port_backend_log
T-2/var/log/httpd/_type-zope-virtualhostroot-http-port_error_log
T-2/var/log/httpd/_type-zope-virtualhostroot-https-port_access_log
T-2/var/log/httpd/_type-zope-virtualhostroot-https-port_backend_log
T-2/var/log/httpd/_type-zope-virtualhostroot-https-port_error_log
T-2/var/log/httpd/_type-zope_access_log
T-2/var/log/httpd/_type-zope_backend_log
T-2/var/log/httpd/_type-zope_error_log
T-2/var/log/httpd/_url-netloc-list_access_log
T-2/var/log/httpd/_url-netloc-list_backend_log
T-2/var/log/httpd/_url-netloc-list_error_log
T-2/var/log/httpd/_url_https-url_access_log
T-2/var/log/httpd/_url_https-url_backend_log
T-2/var/log/httpd/_url_https-url_error_log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestSlaveGlobalDisableHttp2.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000004174 14241130220 0045526 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-_monitor-ipv4-test-ipv4-packet-list-test.py
T-2/etc/plugin/check-_monitor-ipv6-test-ipv6-packet-list-test.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestSlaveGlobalDisableHttp2.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0045030 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestSlaveGlobalDisableHttp2.test_master_partition_state-CADDY.txt 0000664 0000000 0000000 00000001302 14241130220 0046750 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data SetEnvIf Origin "^http(s)?://(.+\.)?(monitor\.app\.officejs\.com)$" ORIGIN_DOMAIN=$0
Header always set Access-Control-Allow-Origin "%{ORIGIN_DOMAIN}e" env=ORIGIN_DOMAIN
Header always set Access-Control-Allow-Credentials "true" env=ORIGIN_DOMAIN
Header always set Access-Control-Allow-Methods "PROPFIND, PROPPATCH, COPY, MOVE, DELETE, MKCOL, LOCK, UNLOCK, PUT, GETLIB, VERSION-CONTROL, CHECKIN, CHECKOUT, UNCHECKOUT, REPORT, UPDATE, CANCELUPLOAD, HEAD, OPTIONS, GET, POST" env=ORIGIN_DOMAIN
Header always set Access-Control-Allow-Headers "Overwrite, Destination, Content-Type, Depth, User-Agent, X-File-Size, X-Requested-With, If-Modified-Since, X-File-Name, Cache-Control, Authorization" env=ORIGIN_DOMAIN
test.TestSlaveGlobalDisableHttp2.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003452 14241130220 0045615 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
test.TestSlaveHealthCheck.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000000644 14241130220 0045062 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestSlaveHealthCheck.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000004645 14241130220 0043551 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/httpd/_health-check-connect_access_log
T-2/var/log/httpd/_health-check-connect_backend_log
T-2/var/log/httpd/_health-check-connect_error_log
T-2/var/log/httpd/_health-check-custom_access_log
T-2/var/log/httpd/_health-check-custom_backend_log
T-2/var/log/httpd/_health-check-custom_error_log
T-2/var/log/httpd/_health-check-default_access_log
T-2/var/log/httpd/_health-check-default_backend_log
T-2/var/log/httpd/_health-check-default_error_log
T-2/var/log/httpd/_health-check-disabled_access_log
T-2/var/log/httpd/_health-check-disabled_backend_log
T-2/var/log/httpd/_health-check-disabled_error_log
T-2/var/log/httpd/_health-check-failover-url-auth-to-backend_access_log
T-2/var/log/httpd/_health-check-failover-url-auth-to-backend_backend_log
T-2/var/log/httpd/_health-check-failover-url-auth-to-backend_error_log
T-2/var/log/httpd/_health-check-failover-url-netloc-list_access_log
T-2/var/log/httpd/_health-check-failover-url-netloc-list_backend_log
T-2/var/log/httpd/_health-check-failover-url-netloc-list_error_log
T-2/var/log/httpd/_health-check-failover-url-ssl-proxy-verified_access_log
T-2/var/log/httpd/_health-check-failover-url-ssl-proxy-verified_backend_log
T-2/var/log/httpd/_health-check-failover-url-ssl-proxy-verified_error_log
T-2/var/log/httpd/_health-check-failover-url-ssl-proxy-verify-missing_access_log
T-2/var/log/httpd/_health-check-failover-url-ssl-proxy-verify-missing_backend_log
T-2/var/log/httpd/_health-check-failover-url-ssl-proxy-verify-missing_error_log
T-2/var/log/httpd/_health-check-failover-url-ssl-proxy-verify-unverified_access_log
T-2/var/log/httpd/_health-check-failover-url-ssl-proxy-verify-unverified_backend_log
T-2/var/log/httpd/_health-check-failover-url-ssl-proxy-verify-unverified_error_log
T-2/var/log/httpd/_health-check-failover-url_access_log
T-2/var/log/httpd/_health-check-failover-url_backend_log
T-2/var/log/httpd/_health-check-failover-url_error_log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestSlaveHealthCheck.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000003772 14241130220 0044266 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestSlaveHealthCheck.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0043565 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestSlaveHealthCheck.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003452 14241130220 0044352 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
test.TestSlaveHostHaproxyClash.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000000644 14241130220 0046162 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestSlaveHostHaproxyClash.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000001422 14241130220 0044637 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/httpd/_wildcard_access_log
T-2/var/log/httpd/_wildcard_backend_log
T-2/var/log/httpd/_wildcard_error_log
T-2/var/log/httpd/_zspecific_access_log
T-2/var/log/httpd/_zspecific_backend_log
T-2/var/log/httpd/_zspecific_error_log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestSlaveHostHaproxyClash.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000003772 14241130220 0045366 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestSlaveHostHaproxyClash.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0044665 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestSlaveHostHaproxyClash.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003452 14241130220 0045452 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
test.TestSlaveSlapOSMasterCertificateCompatibility.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000000644 14241130220 0052131 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestSlaveSlapOSMasterCertificateCompatibility.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000005241 14241130220 0050611 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_access_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_backend_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_error_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_ssl_ca_crt_access_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_ssl_ca_crt_backend_log
T-2/var/log/httpd/_custom_domain_ssl_crt_ssl_key_ssl_ca_crt_error_log
T-2/var/log/httpd/_ssl_ca_crt_does_not_match_access_log
T-2/var/log/httpd/_ssl_ca_crt_does_not_match_backend_log
T-2/var/log/httpd/_ssl_ca_crt_does_not_match_error_log
T-2/var/log/httpd/_ssl_ca_crt_garbage_access_log
T-2/var/log/httpd/_ssl_ca_crt_garbage_backend_log
T-2/var/log/httpd/_ssl_ca_crt_garbage_error_log
T-2/var/log/httpd/_ssl_from_master_access_log
T-2/var/log/httpd/_ssl_from_master_backend_log
T-2/var/log/httpd/_ssl_from_master_error_log
T-2/var/log/httpd/_ssl_from_master_kedifa_overrides_access_log
T-2/var/log/httpd/_ssl_from_master_kedifa_overrides_backend_log
T-2/var/log/httpd/_ssl_from_master_kedifa_overrides_error_log
T-2/var/log/httpd/_ssl_from_slave_access_log
T-2/var/log/httpd/_ssl_from_slave_backend_log
T-2/var/log/httpd/_ssl_from_slave_error_log
T-2/var/log/httpd/_ssl_from_slave_kedifa_overrides_access_log
T-2/var/log/httpd/_ssl_from_slave_kedifa_overrides_backend_log
T-2/var/log/httpd/_ssl_from_slave_kedifa_overrides_error_log
T-2/var/log/httpd/_type-notebook-ssl_from_master_access_log
T-2/var/log/httpd/_type-notebook-ssl_from_master_backend_log
T-2/var/log/httpd/_type-notebook-ssl_from_master_error_log
T-2/var/log/httpd/_type-notebook-ssl_from_master_kedifa_overrides_access_log
T-2/var/log/httpd/_type-notebook-ssl_from_master_kedifa_overrides_backend_log
T-2/var/log/httpd/_type-notebook-ssl_from_master_kedifa_overrides_error_log
T-2/var/log/httpd/_type-notebook-ssl_from_slave_access_log
T-2/var/log/httpd/_type-notebook-ssl_from_slave_backend_log
T-2/var/log/httpd/_type-notebook-ssl_from_slave_error_log
T-2/var/log/httpd/_type-notebook-ssl_from_slave_kedifa_overrides_access_log
T-2/var/log/httpd/_type-notebook-ssl_from_slave_kedifa_overrides_backend_log
T-2/var/log/httpd/_type-notebook-ssl_from_slave_kedifa_overrides_error_log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestSlaveSlapOSMasterCertificateCompatibility.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000003772 14241130220 0051335 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestSlaveSlapOSMasterCertificateCompatibility.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0050634 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestSlaveSlapOSMasterCertificateCompatibility.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003452 14241130220 0051421 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
test.TestSlaveSlapOSMasterCertificateCompatibilityOverrideMaster.test_file_list_etc_cron_d-CADDY.txt0000664 0000000 0000000 00000000644 14241130220 0055005 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestSlaveSlapOSMasterCertificateCompatibilityOverrideMaster.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000001433 14241130220 0053464 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/httpd/_ssl_from_master_kedifa_overrides_master_certificate_access_log
T-2/var/log/httpd/_ssl_from_master_kedifa_overrides_master_certificate_backend_log
T-2/var/log/httpd/_ssl_from_master_kedifa_overrides_master_certificate_error_log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestSlaveSlapOSMasterCertificateCompatibilityOverrideMaster.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000003772 14241130220 0054211 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestSlaveSlapOSMasterCertificateCompatibilityOverrideMaster.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0053510 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestSlaveSlapOSMasterCertificateCompatibilityOverrideMaster.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003452 14241130220 0054275 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
test.TestSlaveSlapOSMasterCertificateCompatibilityUpdate.test_file_list_etc_cron_d-CADDY.txt 0000664 0000000 0000000 00000000644 14241130220 0053274 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/cron.d/logrotate
T-0/etc/cron.d/monitor-configurator
T-0/etc/cron.d/monitor-globalstate
T-0/etc/cron.d/monitor_collect
T-1/etc/cron.d/logrotate
T-1/etc/cron.d/monitor-configurator
T-1/etc/cron.d/monitor-globalstate
T-1/etc/cron.d/monitor_collect
T-2/etc/cron.d/logrotate
T-2/etc/cron.d/monitor-configurator
T-2/etc/cron.d/monitor-globalstate
T-2/etc/cron.d/monitor_collect
T-2/etc/cron.d/trafficserver-logrotate
test.TestSlaveSlapOSMasterCertificateCompatibilityUpdate.test_file_list_log-CADDY.txt 0000664 0000000 0000000 00000001257 14241130220 0051757 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/log/monitor-httpd-access.log
T-0/var/log/monitor-httpd-error.log
T-0/var/log/slapgrid-T-0-error.log
T-1/var/log/expose-csr.log
T-1/var/log/kedifa.log
T-1/var/log/monitor-httpd-access.log
T-1/var/log/monitor-httpd-error.log
T-2/var/log/backend-haproxy.log
T-2/var/log/expose-csr.log
T-2/var/log/frontend-access.log
T-2/var/log/frontend-error.log
T-2/var/log/httpd/_ssl_from_master_access_log
T-2/var/log/httpd/_ssl_from_master_backend_log
T-2/var/log/httpd/_ssl_from_master_error_log
T-2/var/log/monitor-httpd-access.log
T-2/var/log/monitor-httpd-error.log
T-2/var/log/slave-introspection-access.log
T-2/var/log/slave-introspection-error.log
T-2/var/log/trafficserver/manager.log
test.TestSlaveSlapOSMasterCertificateCompatibilityUpdate.test_file_list_plugin-CADDY.txt 0000664 0000000 0000000 00000003772 14241130220 0052500 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/etc/plugin/__init__.py
T-0/etc/plugin/aibcc-sign-promise.py
T-0/etc/plugin/aibcc-user-caucase-updater.py
T-0/etc/plugin/aikc-sign-promise.py
T-0/etc/plugin/aikc-user-caucase-updater.py
T-0/etc/plugin/buildout-T-0-status.py
T-0/etc/plugin/caucased-backend-client.py
T-0/etc/plugin/check-backend-haproxy-statistic-url-caddy-frontend-1.py
T-0/etc/plugin/check-free-disk-space.py
T-0/etc/plugin/monitor-bootstrap-status.py
T-0/etc/plugin/monitor-http-frontend.py
T-0/etc/plugin/monitor-httpd-listening-on-tcp.py
T-0/etc/plugin/rejected-slave-publish-ip-port-listening.py
T-0/etc/plugin/rejected-slave.py
T-1/etc/plugin/__init__.py
T-1/etc/plugin/buildout-T-1-status.py
T-1/etc/plugin/caucased.py
T-1/etc/plugin/check-free-disk-space.py
T-1/etc/plugin/expose-csr-ip-port-listening.py
T-1/etc/plugin/kedifa-http-reply.py
T-1/etc/plugin/monitor-bootstrap-status.py
T-1/etc/plugin/monitor-http-frontend.py
T-1/etc/plugin/monitor-httpd-listening-on-tcp.py
T-1/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/__init__.py
T-2/etc/plugin/backend-client-caucase-updater.py
T-2/etc/plugin/backend-haproxy-configuration.py
T-2/etc/plugin/backend-haproxy-statistic-frontend.py
T-2/etc/plugin/backend_haproxy_http.py
T-2/etc/plugin/backend_haproxy_https.py
T-2/etc/plugin/buildout-T-2-status.py
T-2/etc/plugin/caddy_frontend_ipv4_http.py
T-2/etc/plugin/caddy_frontend_ipv4_https.py
T-2/etc/plugin/caddy_frontend_ipv6_http.py
T-2/etc/plugin/caddy_frontend_ipv6_https.py
T-2/etc/plugin/caucase-updater.py
T-2/etc/plugin/check-free-disk-space.py
T-2/etc/plugin/expose-csr-ip-port-listening.py
T-2/etc/plugin/frontend-caddy-configuration-promise.py
T-2/etc/plugin/monitor-bootstrap-status.py
T-2/etc/plugin/monitor-http-frontend.py
T-2/etc/plugin/monitor-httpd-listening-on-tcp.py
T-2/etc/plugin/promise-logrotate-setup.py
T-2/etc/plugin/re6st-connectivity.py
T-2/etc/plugin/slave-introspection-configuration.py
T-2/etc/plugin/slave_introspection_https.py
T-2/etc/plugin/trafficserver-cache-availability.py
T-2/etc/plugin/trafficserver-port-listening.py
test.TestSlaveSlapOSMasterCertificateCompatibilityUpdate.test_file_list_run-CADDY.txt 0000664 0000000 0000000 00000001065 14241130220 0051777 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0/var/run/monitor-httpd.pid
T-1/var/run/kedifa.pid
T-1/var/run/monitor-httpd.pid
T-2/var/run/backend-haproxy-rsyslogd.pid
T-2/var/run/backend-haproxy.pid
T-2/var/run/backend_haproxy_configuration_last_state
T-2/var/run/backend_haproxy_graceful_configuration_state_signature
T-2/var/run/bhlog.sck
T-2/var/run/graceful_configuration_state_signature
T-2/var/run/httpd.pid
T-2/var/run/monitor-httpd.pid
T-2/var/run/slave-introspection.pid
T-2/var/run/slave_introspection_configuration_last_state
T-2/var/run/slave_introspection_graceful_configuration_state_signature
test.TestSlaveSlapOSMasterCertificateCompatibilityUpdate.test_supervisor_state-CADDY.txt 0000664 0000000 0000000 00000003452 14241130220 0052564 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caddy-frontend/test/test_data T-0:aibcc-user-caucase-updater-on-watch RUNNING
T-0:aikc-user-caucase-updater-on-watch RUNNING
T-0:bootstrap-monitor EXITED
T-0:caucased-backend-client-{hash-generic}-on-watch RUNNING
T-0:certificate_authority-{hash-generic}-on-watch RUNNING
T-0:crond-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-{hash-generic}-on-watch RUNNING
T-0:monitor-httpd-graceful EXITED
T-0:rejected-slave-publish-{hash-rejected-slave-publish}-on-watch RUNNING
T-1:bootstrap-monitor EXITED
T-1:caucase-updater-on-watch RUNNING
T-1:caucased-{hash-generic}-on-watch RUNNING
T-1:certificate_authority-{hash-generic}-on-watch RUNNING
T-1:crond-{hash-generic}-on-watch RUNNING
T-1:expose-csr-{hash-generic}-on-watch RUNNING
T-1:kedifa-{hash-generic}-on-watch RUNNING
T-1:kedifa-reloader EXITED
T-1:monitor-httpd-{hash-generic}-on-watch RUNNING
T-1:monitor-httpd-graceful EXITED
T-2:6tunnel-11080-{hash-generic}-on-watch RUNNING
T-2:6tunnel-11443-{hash-generic}-on-watch RUNNING
T-2:backend-client-login-certificate-caucase-updater-on-watch RUNNING
T-2:backend-haproxy-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-rsyslogd-{hash-generic}-on-watch RUNNING
T-2:backend-haproxy-safe-graceful EXITED
T-2:bootstrap-monitor EXITED
T-2:certificate_authority-{hash-generic}-on-watch RUNNING
T-2:crond-{hash-generic}-on-watch RUNNING
T-2:expose-csr-{hash-generic}-on-watch RUNNING
T-2:frontend-caddy-safe-graceful EXITED
T-2:frontend_caddy-{hash-caddy-T-2}-on-watch RUNNING
T-2:kedifa-login-certificate-caucase-updater-on-watch RUNNING
T-2:kedifa-updater-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-{hash-generic}-on-watch RUNNING
T-2:monitor-httpd-graceful EXITED
T-2:slave-instrospection-nginx-{hash-generic}-on-watch RUNNING
T-2:slave-introspection-safe-graceful EXITED
T-2:trafficserver-{hash-generic}-on-watch RUNNING
T-2:trafficserver-reload EXITED
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caucase/ 0000775 0000000 0000000 00000000000 14241130220 0025163 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caucase/buildout.hash.cfg 0000664 0000000 0000000 00000001662 14241130220 0030422 0 ustar 00root root 0000000 0000000 # THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# But avoid directories, they are not portable.
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[instance-caucased]
filename = instance-caucased.cfg.jinja2
md5sum = 2e7c1d8c553b398dd68c875a9fa38cdb
[instance]
filename = instance.cfg.jinja2
md5sum = b4a50217c68233eb0e2922fd7606a6be
instance-caucase-input-schema.json 0000664 0000000 0000000 00000002446 14241130220 0033606 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caucase {
"type": "object",
"$schema": "http://json-schema.org/draft-04/schema",
"extends": "./schema-definitions.json#",
"title": "Input Parameters",
"properties": {
"base-port": {
"title": "Base TCP port",
"description": "If 80, caucase will also listen on 443. Otherwise, caucase will listen on port and port + 1.",
"type": "integer",
"default": 8009
},
"external-url": {
"$comment": "(deprecated)"
},
"service-auto-approve-amount": {
"title": "Number of service certificate requests to automatically approve",
"description": "Once that number has been reached, a user must validate further requests. Renewals do not count toward this number. Cannot be changed once set.",
"type": "integer",
"default": 0
},
"user-auto-approve-amount": {
"title": "Number of user certificate requests to automatically approve",
"description": "Once that number has been reached, a user must validate further requests. Renewals do not count toward this number. Cannot be changed once set.",
"type": "integer",
"default": 1
},
"key-length": {
"title": "Key length",
"description": "Size, in bits, of the SSL key generated to authenticate users.",
"default": 2048,
"type": "integer"
}
}
}
instance-caucase-output-schema.json 0000664 0000000 0000000 00000000361 14241130220 0034001 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caucase {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Values returned by Caucase instantiation",
"properties": {
"url": {
"description": "Caucase URL",
"type": "string"
}
},
"type": "object"
}
instance-caucased.cfg.jinja2 0000664 0000000 0000000 00000001703 14241130220 0032314 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caucase {% import "caucase" as caucase with context %}
{% set netloc = '[' ~ (ipv6_set | list)[0] ~ ']:' ~ slapparameter_dict.get('base-port', 8009) -%}
[directory]
recipe = slapos.cookbook:mkdirectory
etc = ${buildout:directory}/etc
service-on-watch = ${:etc}/service
srv = ${buildout:directory}/srv
tmp = ${buildout:directory}/tmp
{{ caucase.caucased(
prefix='caucased',
buildout_bin_directory=bin_directory,
caucased_path='${directory:service-on-watch}/caucased',
data_dir='${directory:srv}/caucased',
netloc=netloc,
tmp='%{directory:tmp}',
service_auto_approve_count=slapparameter_dict.get('service-auto-approve-amount', 0),
user_auto_approve_count=slapparameter_dict.get('user-auto-approve-amount', 1),
key_len=slapparameter_dict.get('key-length', 2048),
) }}
[publish]
recipe = slapos.cookbook:publish.serialised
url = {{ dumps('http://' ~ netloc) }}
[buildout]
parts =
publish
caucased
caucased-promise
extends = {{ template_monitor }}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caucase/instance.cfg.jinja2 0000664 0000000 0000000 00000002206 14241130220 0030624 0 ustar 00root root 0000000 0000000 [buildout]
parts = switch-softwaretype
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration.serialised
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
[context]
bin-directory = {{ dumps(bin_directory) }}
caucase-jinja2-library = {{ dumps(caucase_jinja2_library) }}
instance-caucased = {{ dumps(instance_caucased) }}
[caucased]
recipe = slapos.recipe.template:jinja2
url = ${context:instance-caucased}
output = ${buildout:parts-directory}/instance-caucased.cfg
monitor = {{ template_monitor }}
context =
key ipv6_set slap-configuration:ipv6
key slapparameter_dict slap-configuration:configuration
key bin_directory context:bin-directory
key template_monitor :monitor
import-list =
file caucase context:caucase-jinja2-library
[switch-softwaretype]
recipe = slapos.cookbook:switch-softwaretype
default = caucased:output
# XXX: When will this name finally go away ?
RootSoftwareInstance = ${:default}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caucase/software.cfg 0000664 0000000 0000000 00000001336 14241130220 0027501 0 ustar 00root root 0000000 0000000 [buildout]
extends =
buildout.hash.cfg
../../stack/caucase/buildout.cfg
parts +=
instance
caucase-eggs
[instance-caucased]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:filename}
[instance]
recipe = slapos.recipe.template:jinja2
# XXX: "template.cfg" is hardcoded in instanciation recipe
output = ${buildout:directory}/template.cfg
url = ${:_profile_base_location_}/${:filename}
context =
key bin_directory buildout:bin-directory
key develop_eggs_directory buildout:develop-eggs-directory
key eggs_directory buildout:eggs-directory
key caucase_jinja2_library caucase-jinja2-library:target
key instance_caucased instance-caucased:target
key template_monitor monitor2-template:output
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caucase/software.cfg.json 0000664 0000000 0000000 00000000550 14241130220 0030446 0 ustar 00root root 0000000 0000000 {
"name": "Caucase",
"description": "Caucase certificate authority",
"serialisation": "json-in-xml",
"software-type": {
"default": {
"title": "Default",
"description": "Single caucase instance",
"request": "instance-caucase-input-schema.json",
"response": "instance-caucase-output-schema.json",
"index": 0
}
}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caucase/test/ 0000775 0000000 0000000 00000000000 14241130220 0026142 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caucase/test/setup.py 0000664 0000000 0000000 00000003476 14241130220 0027666 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.caucase'
setup(
name=name,
version=version,
description="Test for SlapOS' Caucase",
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'slapos.libnetworkcache',
'erp5.util',
'supervisor',
'pexpect',
'requests',
],
zip_safe=True,
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/caucase/test/test.py 0000664 0000000 0000000 00000005040 14241130220 0027472 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2019 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from __future__ import unicode_literals
import json
import os
import requests
import httplib
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'software.cfg')))
class TestCaucase(SlapOSInstanceTestCase):
def deserializeConnectionParameter(self):
return json.loads(
self.computer_partition.getConnectionParameter('_'))
def test(self):
connection_parameter_dict = self.deserializeConnectionParameter()
self.assertEqual(
connection_parameter_dict,
{'url': 'http://[%s]:8009' % (self._ipv6_address,)}
)
result = requests.get(connection_parameter_dict['url'])
self.assertEqual(result.status_code, httplib.OK)
self.assertEqual(
result.json(),
{
'_links': {
'self': {
'href': 'http://[%s]:8009' % (self._ipv6_address,)
},
'getCAUHAL': {
'href': 'http://[%s]:8009//cau' % (self._ipv6_address,),
'title': 'cau'
},
'getCASHAL': {
'href': 'http://[%s]:8009//cas' % (self._ipv6_address,),
'title': 'cas'
}
}
}
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cloudooo/ 0000775 0000000 0000000 00000000000 14241130220 0025402 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cloudooo/buildout.hash.cfg 0000664 0000000 0000000 00000001611 14241130220 0030633 0 ustar 00root root 0000000 0000000 # THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[template-cloudooo]
filename = instance.cfg.in
md5sum = d1e4d7306c39f2ebc64d0407860d4301
[template-cloudooo-instance]
filename = instance-cloudooo.cfg.in
md5sum = 90299c1dbdc5f983613794a8e9a7bc9d
instance-cloudooo-input-schema.json 0000664 0000000 0000000 00000001556 14241130220 0034245 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cloudooo {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"extends": "./schema-definitions.json#",
"properties": {
"tcpv4-port": {
"allOf": [
{
"$ref": "#/definitions/tcpv4port"
},
{
"description": "Start allocating ports at this value, going upward",
"default": 23000
}
]
},
"backend-count": {
"description": "Number of backend cloudooo instances",
"default": 1,
"type": "integer"
},
"timeout": {
"description": "Configure apache with this timeout",
"type": "integer"
},
"mimetype-entry-addition": {
"description": "The list of entry to add to the cloudooo mimetype registry. Each entry should on one line which format is: \" \"",
"type": "string"
}
}
}
instance-cloudooo-output-schema.json 0000664 0000000 0000000 00000000414 14241130220 0034436 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cloudooo {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Values returned by Cloudooo instanciation",
"properties": {
"url": {
"description": "Conversion service access information",
"type": "string"
}
},
"type": "object"
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cloudooo/instance-cloudooo.cfg.in 0000664 0000000 0000000 00000024436 14241130220 0032126 0 ustar 00root root 0000000 0000000 {% set ipv4 = (ipv4_set | list)[0] -%}
{% if ipv6_set %}{% set ipv6 = (ipv6_set | list)[0] %}{% endif -%}
{% set instance_parameter_dict = parameter_dict['instance-parameter-dict'] -%}
{% macro assert(x) %}{{ ("",)[not x] }}{% endmacro -%}
{% set publish_url_name = instance_parameter_dict.get('publish-url-name') or 'url' -%}
{% set next_port = instance_parameter_dict['port'] | int -%}
{% if instance_parameter_dict.get('port-parameter-name') -%}
{% set next_port = slapparameter_dict.get(instance_parameter_dict['port-parameter-name'], next_port) | int -%}
{% endif -%}
{% do assert(next_port > 0) -%}
{% set next_port = itertools.count(next_port).next -%}
{% set backend_count = instance_parameter_dict['backend-count'] | int -%}
{% if instance_parameter_dict.get('backend-count-parameter-name') -%}
{% set backend_count = slapparameter_dict.get(instance_parameter_dict['backend-count-parameter-name'], backend_count) | int -%}
{% endif -%}
{% do assert(backend_count > 0) -%}
{% set timeout = instance_parameter_dict['timeout'] | int -%}
{% if instance_parameter_dict.get('timeout-parameter-name') -%}
{% set timeout = slapparameter_dict.get(instance_parameter_dict['timeout-parameter-name'], timeout) | int -%}
{% endif -%}
{% do assert(timeout > 0) -%}
{% set mimetype_entry_addition = instance_parameter_dict.get('mimetype-entry-addition', '') -%}
{% if instance_parameter_dict.get('mimetype-entry-addition-parameter-name') -%}
{% set mimetype_entry_addition = mimetype_entry_addition ~ "\n" ~ slapparameter_dict.get(instance_parameter_dict['mimetype-entry-addition-parameter-name'], '') -%}
{% endif -%}
{% set apache_port = next_port() -%}
{% set haproxy_port = next_port() -%}
{% set apache_ip_list = [ipv4] -%}
{% if ipv6_set -%}
{% do apache_ip_list.append('[' ~ ipv6 ~ ']') -%}
{% endif -%}
{% set apache_dict = {} -%}
{% do apache_dict.__setitem__(publish_url_name, (apache_port, "https", 'http://' ~ ipv4 ~ ':' ~ haproxy_port, False)) -%}
{% set bin_directory = parameter_dict['buildout-bin-directory'] -%}
{% set section_list = [] -%}
{% set cloudooo_section_list = [] -%}
{% macro cloudooo(name) %}{% do cloudooo_section_list.append(name) %}{{ name }}{% endmacro -%}
{% macro simplefile(section_name, file_path, content, mode='') -%}
{% set content_section_name = section_name ~ '-content' -%}
[{{ content_section_name }}]
content = {{ dumps(content) }}
[{{ section_name }}]
recipe = slapos.recipe.template
output = {{ file_path }}
inline = {{ '${' + content_section_name + ':content}' }}
{%- endmacro %}
[buildout]
extends =
{{ parameter_dict['template-logrotate-base'] }}
{{ parameter_dict['template-monitor'] }}
parts =
monitor-base
publish
apache
apache-conf
apache-promise
apache-logrotate
cloudooo-test-runner
haproxy
xvfb-instance
wkhtmltopdf-on-xvfb
[apache]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:services}/apache
command-line = "{{ parameter_dict['apache'] }}/bin/httpd" -f "${apache-conf:output}" -DFOREGROUND
[apache-conf]
recipe = slapos.recipe.template:jinja2
url = {{ parameter_dict['template-apache-conf'] }}
output = ${directory:apache-conf}/apache.conf
context = section parameter_dict apache-conf-parameter-dict
[apache-conf-parameter-dict]
backend-list = {{ dumps(apache_dict.values()) }}
ip-list = {{ dumps(apache_ip_list) }}
pid-file = ${directory:run}/apache.pid
error-log = ${directory:log}/apache-error.log
access-log = ${directory:log}/apache-access.log
# Apache 2.4's default value (60 seconds) can be a bit too short
timeout = {{ timeout }}
# Basic SSL server configuration
cert = ${apache-ssl:cert}
key = ${apache-ssl:key}
cipher =
ssl-session-cache = ${directory:log}/apache-ssl-session-cache
[apache-promise]
# Check any apache port in ipv4, expect other ports and ipv6 to behave consistently
<= monitor-promise-base
promise = check_url_available
name = apache.py
config-url = https://{{ ipv4 }}:{{ apache_dict.values()[0][0] }}
# XXX cloudooo replies "400 Bad Request" for GET on / but what we want to check
# is that we don't have a "503 Service Unavailable" from apache or haproxy.
config-http-code = 400
[apache-conf-ssl]
cert = ${directory:apache-conf}/apache.crt
key = ${directory:apache-conf}/apache.pem
ca-cert = ${directory:apache-conf}/ca.crt
crl = ${directory:apache-conf}/crl.pem
[apache-ssl]
recipe = plone.recipe.command
command = "{{ parameter_dict['openssl'] }}/bin/openssl" req -newkey rsa -batch -new -x509 -days 3650 -nodes -keyout "${:key}" -out "${:cert}"
key = ${apache-conf-ssl:key}
cert = ${apache-conf-ssl:cert}
[apache-logrotate]
< = logrotate-entry-base
name = apache
log = ${apache-conf-parameter-dict:error-log} ${apache-conf-parameter-dict:access-log}
post = test ! -s ${apache-conf-parameter-dict:pid-file} || {{ bin_directory }}/slapos-kill --pidfile ${apache-conf-parameter-dict:pid-file} -s USR1
[publish]
recipe = slapos.cookbook:publish.serialised
{% for family_name, (apache_port, scheme, _, _) in apache_dict.items() -%}
{{ family_name ~ '-v6' }} = {% if ipv6_set %}{{ scheme ~ '://[' ~ ipv6 ~ ']:' ~ apache_port }}{% endif %}
{{ family_name }} = {{ scheme ~ '://' ~ ipv4 ~ ':' ~ apache_port }}
{% endfor -%}
[fontconfig-conf]
recipe = slapos.recipe.template:jinja2
url = {{ parameter_dict['template-fonts-conf'] }}
output = ${directory:etc}/fonts.conf
context =
key cachedir directory:fontconfig-cache
key fonts :fonts
key includes :includes
fonts =
{% for font in parameter_dict['fonts'].splitlines() %}
{{ font }}
{% endfor%}
${directory:font}
includes =
{% for include in parameter_dict['fontconfig-includes'].splitlines() %}
{{ include }}
{% endfor%}
[cloudooo-base]
recipe = slapos.cookbook:generic.cloudooo
ip = {{ ipv4 }}
environment =
LD_LIBRARY_PATH = {{ parameter_dict['cairo'] }}/lib:{{ parameter_dict['cups'] }}/lib:{{ parameter_dict['cups'] }}/lib64:{{ parameter_dict['dbus'] }}/lib:{{ parameter_dict['dbus-glib'] }}/lib:{{ parameter_dict['file'] }}/lib:{{ parameter_dict['fontconfig'] }}/lib:{{ parameter_dict['freetype'] }}/lib:{{ parameter_dict['gcc'] }}/lib:{{ parameter_dict['gcc'] }}/lib64:{{ parameter_dict['glib'] }}/lib:{{ parameter_dict['glu'] }}/lib:{{ parameter_dict['libICE'] }}/lib:{{ parameter_dict['libSM'] }}/lib:{{ parameter_dict['libX11'] }}/lib:{{ parameter_dict['libXau'] }}/lib:{{ parameter_dict['libXdmcp'] }}/lib:{{ parameter_dict['libXext'] }}/lib:{{ parameter_dict['libXrender'] }}/lib:{{ parameter_dict['libexpat'] }}/lib:{{ parameter_dict['libffi'] }}/lib:{{ parameter_dict['libffi'] }}/lib64:{{ parameter_dict['libpng12'] }}/lib:{{ parameter_dict['libxcb'] }}/lib:{{ parameter_dict['mesa'] }}/lib:{{ parameter_dict['pixman'] }}/lib:{{ parameter_dict['xdamage'] }}/lib:{{ parameter_dict['xfixes'] }}/lib:{{ parameter_dict['zlib'] }}/lib
FONTCONFIG_FILE = ${fontconfig-conf:output}
PATH = ${binary-link:target-directory}
LANG = C.UTF-8
mimetype_entry_addition =
{% for entry in mimetype_entry_addition.splitlines() -%}
{{ " " ~ entry.strip() }}
{% endfor -%}
# Binary information
# cloudooo specific configuration
ooo-binary-path = {{ parameter_dict['libreoffice-bin'] }}/program
ooo-paster = {{ bin_directory }}/cloudooo_paster
ooo-uno-path = {{ parameter_dict['libreoffice-bin'] }}/basis-link/program
{% for index in range(1, backend_count + 1) -%}
{% set name = 'cloudooo-' ~ index -%}
[{{ cloudooo(name) }}]
< = cloudooo-base
port = {{ next_port() }}
openoffice-port = {{ next_port() }}
configuration-file = ${directory:etc}/{{ name }}.cfg
data-directory = ${directory:srv}/{{ name }}
wrapper = ${directory:services}/{{ name }}
{% endfor -%}
[haproxy]
recipe = slapos.cookbook:haproxy
name = cloudooo
conf-path = ${directory:etc}/haproxy.cfg
socket-path = ${directory:run}/haproxy.sock
ip = {{ ipv4 }}
port = {{ haproxy_port }}
maxconn = 1
wrapper-path = ${directory:services}/haproxy
binary-path = {{ parameter_dict['haproxy'] }}/sbin/haproxy
ctl-path = ${directory:bin}/haproxy-ctl
backend-list =
{%- for section_name in cloudooo_section_list %}
{{ "${" ~ section_name ~ ":ip}:${" ~ section_name ~ ":port}" }}
{%- endfor %}
[cloudooo-test-runner]
recipe = slapos.cookbook:cloudooo.test
prepend-path = ${buildout:bin-directory}
run-unit-test = ${buildout:bin-directory}/runUnitTest
run-test-suite = ${buildout:bin-directory}/runTestSuite
ooo-paster = ${cloudooo-1:ooo-paster}
configuration-file = ${cloudooo-1:configuration-file}
run-unit-test-binary = {{ bin_directory }}/runCloudoooUnitTest
run-test-suite-binary = {{ bin_directory }}/runCloudoooTestSuite
[binary-link]
recipe = slapos.cookbook:symbolic.link
target-directory = ${directory:bin}
link-binary =
{{ parameter_dict['coreutils'] }}/bin/basename
{{ parameter_dict['coreutils'] }}/bin/cat
{{ parameter_dict['coreutils'] }}/bin/cp
{{ parameter_dict['coreutils'] }}/bin/ls
{{ parameter_dict['coreutils'] }}/bin/tr
{{ parameter_dict['coreutils'] }}/bin/uname
{{ parameter_dict['coreutils'] }}/bin/dirname
# wrapper recipe needs the head command
{{ parameter_dict['coreutils'] }}/bin/head
{{ parameter_dict['imagemagick'] }}/bin/convert
{{ parameter_dict['imagemagick'] }}/bin/identify
{{ parameter_dict['poppler'] }}/bin/pdfinfo
{{ parameter_dict['poppler'] }}/bin/pdftotext
{{ parameter_dict['poppler'] }}/bin/pdftohtml
{{ parameter_dict['onlyoffice-core'] }}/bin/x2t
# rest of parts are candidates for some generic stuff
[directory]
recipe = slapos.cookbook:mkdirectory
apache-conf = ${:etc}/apache
bin = ${buildout:directory}/bin
ca-dir = ${buildout:directory}/srv/ssl
certs = ${:ca-dir}/certs
crl = ${:ca-dir}/crl
etc = ${buildout:directory}/etc
font = ${:srv}/font
fontconfig-cache = ${buildout:directory}/.fontconfig
log = ${:var}/log
newcerts = ${:ca-dir}/newcerts
private = ${:ca-dir}/private
requests = ${:ca-dir}/requests
run = ${:var}/run
services = ${:etc}/run
srv = ${buildout:directory}/srv
var = ${buildout:directory}/var
framebuffer = ${:srv}/framebuffer
[xvfb-instance]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:services}/${:_buildout_section_name_}
command-line =
{{ parameter_dict["xserver"] }}/bin/Xvfb
${:display}
-screen 0 1024x768x24
-fbdir ${directory:framebuffer}
environment =
XORG_LOCK_DIR=${:lock-dir}
display = :0
lock-dir = ${directory:run}
[wkhtmltopdf-on-xvfb]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:bin}/wkhtmltopdf
environment =
DISPLAY=${xvfb-instance:display}
XORG_LOCK_DIR=${xvfb-instance:lock-dir}
command-line = {{ parameter_dict['wkhtmltopdf'] }}/wkhtmltopdf --use-xserver
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cloudooo/instance.cfg.in 0000664 0000000 0000000 00000002732 14241130220 0030300 0 ustar 00root root 0000000 0000000 [buildout]
parts =
switch-softwaretype
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
[jinja2-template-base]
recipe = slapos.recipe.template:jinja2
output = ${buildout:parts-directory}/${:_buildout_section_name_}/${:filename}
extra-context =
context =
key ipv6_set slap-configuration:ipv6
key eggs_directory buildout:eggs-directory
key develop_eggs_directory buildout:develop-eggs-directory
key slapparameter_dict slap-configuration:configuration
${:extra-context}
[cloudooo-dynamic-template-parameter-dict]
{% for key, value in dynamic_template_cloudooo_instance_parameter_dict.items() -%}
{{ key }} = {{ dumps(value) }}
{% endfor -%}
instance-parameter-dict = {{ dumps(cloudooo_parameter_dict) }}
[cloudooo-dynamic-template]
< = jinja2-template-base
url = {{ template_cloudooo_instance }}
filename = instance-cloudooo.cfg
extensions = jinja2.ext.do
extra-context =
section parameter_dict cloudooo-dynamic-template-parameter-dict
key ipv4_set slap-configuration:ipv4
import itertools itertools
[switch-softwaretype]
recipe = slapos.cookbook:switch-softwaretype
default = cloudooo-dynamic-template:output
RootSoftwareInstance = ${:default}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cloudooo/software-common.cfg 0000664 0000000 0000000 00000006005 14241130220 0031204 0 ustar 00root root 0000000 0000000 [buildout]
extends =
buildout.hash.cfg
../../stack/cloudooo.cfg
../../stack/logrotate/buildout.cfg
../../stack/monitor/buildout.cfg
../../component/defaults.cfg
parts =
${cloudooo-buildout:parts}
[cloudooo-buildout]
parts =
${stack-cloudooo-buildout:parts}
# Local development
cloudooo-develop
slapos-cookbook
[slap-parameters]
recipe = slapos.cookbook:slapconfiguration
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
[template-cloudooo]
< = template-cloudooo-base
recipe = slapos.recipe.template:jinja2
# XXX: "template.cfg" is hardcoded in instanciation recipe
output = ${buildout:directory}/template.cfg
url = ${:_profile_base_location_}/${:filename}
[template-cloudooo-base]
context =
key develop_eggs_directory buildout:develop-eggs-directory
key eggs_directory buildout:eggs-directory
key template_cloudooo_instance template-cloudooo-instance:target
section dynamic_template_cloudooo_instance_parameter_dict dynamic-template-cloudooo-instance-parameter-dict
section cloudooo_parameter_dict cloudooo-software-parameter-dict
[dynamic-template-cloudooo-instance-parameter-dict]
apache = ${apache:location}
buildout-bin-directory = ${buildout:bin-directory}
cairo = ${cairo:location}
coreutils = ${coreutils:location}
cups = ${cups:location}
dash = ${dash:location}
dbus = ${dbus:location}
dbus-glib = ${dbus-glib:location}
file = ${file:location}
fontconfig = ${fontconfig:location}
template-fonts-conf = ${template-fonts-conf:output}
fonts =
${android-fonts:location}
${ipa-fonts:location}
${ipaex-fonts:location}
${liberation-fonts:location}
${ocrb-fonts:location}
${dejavu-fonts:location}
${libreoffice-bin:location}/share/fonts/
fontconfig-includes =
${fontconfig:location}/etc/fonts/conf.d
freetype = ${freetype:location}
gcc = ${gcc:prefix}
glib = ${glib:location}
glu = ${glu:location}
haproxy = ${haproxy:location}
imagemagick = ${imagemagick:location}
libICE = ${libICE:location}
libSM = ${libSM:location}
libX11 = ${libX11:location}
libXau = ${libXau:location}
libXdmcp = ${libXdmcp:location}
libXext = ${libXext:location}
libXrender = ${libXrender:location}
libexpat = ${libexpat:location}
libffi = ${libffi:location}
libpng12 = ${libpng12:location}
libreoffice-bin = ${libreoffice-bin:location}
libxcb = ${libxcb:location}
mesa = ${mesa:location}
openssl = ${openssl:location}
onlyoffice-core = ${onlyoffice-core:location}
poppler = ${poppler:location}
pixman = ${pixman:location}
wkhtmltopdf = ${wkhtmltopdf:location}
xdamage = ${xdamage:location}
xfixes = ${xfixes:location}
xserver = ${xserver:location}
zlib = ${zlib:location}
template-apache-conf = ${template-apache-backend-conf:target}
template-logrotate-base = ${template-logrotate-base:output}
template-monitor = ${monitor2-template:output}
[template-cloudooo-instance]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:filename}
[versions]
argparse = 1.4.0
pyPdf = 1.13
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cloudooo/software.cfg 0000664 0000000 0000000 00000001004 14241130220 0027710 0 ustar 00root root 0000000 0000000 [buildout]
extends =
software-common.cfg
parts =
${cloudooo-buildout:parts}
template-cloudooo
[cloudooo-software-parameter-dict]
publish-url-name = cloudooo
port-parameter-name = tcpv4-port
port = 8000
backend-count-parameter-name = backend-count
backend-count = 1
timeout-parameter-name = timeout
# timeout in seconds
timeout = 600
ssl-dict-parameter-name = ssl
#ssl-dict =
mimetype-entry-addition-parameter-name = mimetype-entry-addition
#mimetype-entry-addition =
# text/html application/pdf wkhtmltopdf
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cloudooo/software.cfg.json 0000664 0000000 0000000 00000000513 14241130220 0030664 0 ustar 00root root 0000000 0000000 {
"name": "cloudooo",
"description": "Clusterised cloudooo",
"serialisation": "xml",
"software-type": {
"default": {
"title": "Default",
"description": "Cloudooo",
"request": "instance-cloudooo-input-schema.json",
"response": "instance-cloudooo-output-schema.json",
"index": 0
}
}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cloudooo/test/ 0000775 0000000 0000000 00000000000 14241130220 0026361 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cloudooo/test/README.md 0000664 0000000 0000000 00000000044 14241130220 0027636 0 ustar 00root root 0000000 0000000 Tests for Cloudooo software release
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cloudooo/test/setup.py 0000664 0000000 0000000 00000003752 14241130220 0030102 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.cloudooo'
with open("README.md") as f:
long_description = f.read()
setup(name=name,
version=version,
description="Test for SlapOS' cloudooo",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'slapos.cookbook',
'slapos.libnetworkcache',
'requests',
'six',
'PyPDF2',
],
zip_safe=True,
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cloudooo/test/test.py 0000664 0000000 0000000 00000025330 14241130220 0027715 0 ustar 00root root 0000000 0000000 ##############################################################################
# coding: utf-8
#
# Copyright (c) 2020 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import codecs
import csv
import multiprocessing
import os
import json
import six.moves.xmlrpc_client as xmlrpclib
import six.moves.urllib.parse as urllib_parse
import ssl
import base64
import io
import requests
import PyPDF2
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, _CloudOooTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'software.cfg')))
class CloudOooTestCase(_CloudOooTestCase):
# Cloudooo needs a lot of time before being available.
instance_max_retry = 30
def setUp(self):
self.url = json.loads(
self.computer_partition.getConnectionParameterDict()["_"])['cloudooo']
# XXX ignore certificate errors
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
self.server = xmlrpclib.ServerProxy(
self.url,
context=ssl_context,
allow_none=True,
)
def normalizeFontName(font_name):
if '+' in font_name:
return font_name.split('+')[1]
if font_name.startswith('/'):
return font_name[1:]
def getReferencedFonts(pdf_file_reader):
"""Return fonts referenced in this pdf
"""
fonts = set()
def collectFonts(obj):
"""Recursively visit PDF objects and collect referenced fonts in `fonts`
"""
if hasattr(obj, 'keys'):
if '/BaseFont' in obj:
fonts.add(obj['/BaseFont'])
for k in obj.keys():
collectFonts(obj[k])
for page in pdf_file_reader.pages:
collectFonts(page.getObject()['/Resources'])
return {normalizeFontName(font) for font in fonts}
class HTMLtoPDFConversionFontTestMixin:
"""Mix-In class to test how fonts are selected during
HTML to PDF conversions.
This needs to be mixed with a test case defining:
* pdf_producer : the name of /Producer in PDF metadata
* expected_font_mapping : a mapping of resulting font name in pdf,
keyed by font-family in the input html
* _convert_html_to_pdf: a method to to convert html to pdf
"""
def _convert_html_to_pdf(self, src_html):
# type: (str) -> bytes
"""Convert the HTML source to pdf bytes.
"""
def test(self):
actual_font_mapping_mapping = {}
for font in self.expected_font_mapping:
src_html = '''
the quick brown fox jumps over the lazy dog.
THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG.
'''.format(**locals())
pdf_data = self._convert_html_to_pdf(src_html)
pdf_reader = PyPDF2.PdfFileReader(io.BytesIO((pdf_data)))
self.assertEqual(
self.pdf_producer,
pdf_reader.getDocumentInfo()['/Producer'])
fonts_in_pdf = getReferencedFonts(pdf_reader)
if len(fonts_in_pdf) == 1:
actual_font_mapping_mapping[font] = fonts_in_pdf.pop()
else:
actual_font_mapping_mapping[font] = fonts_in_pdf
self.maxDiff = None
self.assertEqual(self.expected_font_mapping, actual_font_mapping_mapping)
class TestWkhtmlToPDF(HTMLtoPDFConversionFontTestMixin, CloudOooTestCase):
__partition_reference__ = 'wk'
pdf_producer = 'Qt 4.8.7'
expected_font_mapping = {
'Arial': 'LiberationSans',
'Arial Black': 'LiberationSans',
'Avant Garde': 'LiberationSans',
'Bookman': 'LiberationSans',
'Carlito': 'Carlito',
'Comic Sans MS': 'LiberationSans',
'Courier New': 'LiberationSans',
'DejaVu Sans': 'DejaVuSans',
'DejaVu Sans Condensed': 'LiberationSans',
'DejaVu Sans ExtraLight': 'LiberationSans',
'DejaVu Sans Mono': 'DejaVuSansMono',
'DejaVu Serif': 'DejaVuSerif',
'DejaVu Serif Condensed': 'LiberationSans',
'Garamond': 'LiberationSans',
'Gentium Basic': 'GentiumBasic',
'Gentium Book Basic': 'GentiumBookBasic',
'Georgia': 'LiberationSans',
'Helvetica': 'LiberationSans',
'IPAex Gothic': 'LiberationSans',
'IPAex Mincho': 'LiberationSans',
'Impact': 'LiberationSans',
'Liberation Mono': 'LiberationMono',
'Liberation Sans': 'LiberationSans',
'Liberation Sans Narrow': 'LiberationSansNarrow',
'Liberation Serif': 'LiberationSerif',
'Linux LibertineG': 'LiberationSans',
'OpenSymbol': set(['DejaVuSans', 'OpenSymbol']),
'Palatino': 'LiberationSans',
'Roboto Black': 'LiberationSans',
'Roboto Condensed Light': 'LiberationSans',
'Roboto Condensed Regular': 'LiberationSans',
'Roboto Light': 'LiberationSans',
'Roboto Medium': 'LiberationSans',
'Roboto Thin': 'LiberationSans',
'Times New Roman': 'LiberationSans',
'Trebuchet MS': 'LiberationSans',
'Verdana': 'LiberationSans',
'ZZZdefault fonts when no match': 'LiberationSans'
}
def _convert_html_to_pdf(self, src_html):
return base64.decodestring(
self.server.convertFile(
base64.encodestring(src_html.encode()).decode(),
'html',
'pdf',
False,
False,
{
'encoding': 'utf-8'
},
).encode())
class TestLibreoffice(HTMLtoPDFConversionFontTestMixin, CloudOooTestCase):
__partition_reference__ = 'lo'
pdf_producer = 'LibreOffice 5.2'
expected_font_mapping = {
'Arial': 'LiberationSans',
'Arial Black': 'DejaVuSans',
'Avant Garde': 'DejaVuSans',
'Bookman': 'DejaVuSans',
'Carlito': 'Carlito',
'Comic Sans MS': 'DejaVuSans',
'Courier New': 'LiberationMono',
'DejaVu Sans': 'DejaVuSans',
'DejaVu Sans Condensed': 'DejaVuSansCondensed',
'DejaVu Sans ExtraLight': 'DejaVuSans',
'DejaVu Sans Mono': 'DejaVuSansMono',
'DejaVu Serif': 'DejaVuSerif',
'DejaVu Serif Condensed': 'DejaVuSerifCondensed',
'Garamond': 'DejaVuSerif',
'Gentium Basic': 'GentiumBasic',
'Gentium Book Basic': 'GentiumBookBasic',
'Georgia': 'DejaVuSerif',
'Helvetica': 'LiberationSans',
'IPAex Gothic': 'IPAexGothic',
'IPAex Mincho': 'IPAexMincho',
'Impact': 'DejaVuSans',
'Liberation Mono': 'LiberationMono',
'Liberation Sans': 'LiberationSans',
'Liberation Sans Narrow': 'LiberationSansNarrow',
'Liberation Serif': 'LiberationSerif',
'Linux LibertineG': 'LinuxLibertineG',
'OpenSymbol': 'OpenSymbol',
'Palatino': 'DejaVuSerif',
'Roboto Black': 'Roboto-Black',
'Roboto Condensed Light': 'RobotoCondensed-Light',
'Roboto Condensed Regular': 'DejaVuSans',
'Roboto Light': 'Roboto-Light',
'Roboto Medium': 'Roboto-Medium',
'Roboto Thin': 'Roboto-Thin',
'Times New Roman': 'LiberationSerif',
'Trebuchet MS': 'DejaVuSans',
'Verdana': 'DejaVuSans',
'ZZZdefault fonts when no match': 'DejaVuSans'
}
def _convert_html_to_pdf(self, src_html):
return base64.decodestring(
self.server.convertFile(
base64.encodestring(src_html.encode()).decode(),
'html',
'pdf',
).encode())
class TestLibreOfficeTextConversion(CloudOooTestCase):
__partition_reference__ = 'txt'
def test_html_to_text(self):
self.assertEqual(
base64.decodestring(
self.server.convertFile(
base64.encodestring(
u'héhé'.encode('utf-8')).decode(),
'html',
'txt',
).encode()),
codecs.BOM_UTF8 + b'h\xc3\xa9h\xc3\xa9\n',
)
class TestLibreOfficeCluster(CloudOooTestCase):
__partition_reference__ = 'lc'
@classmethod
def getInstanceParameterDict(cls):
return {'backend-count': 4}
def test_multiple_conversions(self):
# make this function global so that it can be picked and used by multiprocessing
global _convert_html_to_text
def _convert_html_to_text(src_html):
return base64.decodestring(
self.server.convertFile(
base64.encodestring(src_html.encode()).decode(),
'html',
'txt',
).encode())
pool = multiprocessing.Pool(5)
# TODO py3: use with pool
converted = pool.map(_convert_html_to_text,
['hello'] * 100)
pool.terminate()
pool.join()
self.assertEqual(converted, [codecs.BOM_UTF8 + b'hello\n'] * 100)
# haproxy stats are exposed
res = requests.get(
urllib_parse.urljoin(self.url, '/haproxy;csv'),
verify=False,
stream=True,
)
reader = csv.DictReader(res.raw)
line_list = list(reader)
# requests have been balanced
total_hrsp_2xx = {
line['svname']: int(line['hrsp_2xx'])
for line in line_list
}
self.assertEqual(total_hrsp_2xx['FRONTEND'], 100)
self.assertEqual(total_hrsp_2xx['BACKEND'], 100)
for backend in 'cloudooo_1', 'cloudooo_2', 'cloudooo_3', 'cloudooo_4':
# ideally there should be 25% of requests on each backend, because we use
# round robin scheduling, but it can happen that some backend take longer
# to start, so we are tolerant here and just check that each backend
# process at least 15% of requests.
self.assertGreater(total_hrsp_2xx[backend], 15)
# no errors
total_eresp = {
line['svname']: int(line['eresp'] or 0)
for line in line_list
}
self.assertEqual(
total_eresp, {
'FRONTEND': 0,
'cloudooo_1': 0,
'cloudooo_2': 0,
'cloudooo_3': 0,
'cloudooo_4': 0,
'BACKEND': 0,
})
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cythonplus-dev/ 0000775 0000000 0000000 00000000000 14241130220 0026543 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cythonplus-dev/buildout.hash.cfg 0000664 0000000 0000000 00000000272 14241130220 0031776 0 ustar 00root root 0000000 0000000 [instance.cfg]
filename = instance.cfg.in
md5sum = 156dcfedd4b5fcbeefb3ac05a062edba
[runTestSuite.in]
_update_hash_filename_ = runTestSuite.in
md5sum = 21a8a202b14475707c414056ba393b3d
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cythonplus-dev/instance.cfg.in 0000664 0000000 0000000 00000002530 14241130220 0031435 0 ustar 00root root 0000000 0000000 [buildout]
parts =
publish-env-path
runTestSuite
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
[publish-env-path]
recipe = slapos.cookbook:publish
readme = Source the script to set up the environment.
script = ${cythonplus_env.sh:output}
repository = ${cythonplus-repository:repository}
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration.serialised
computer = $${slap-connection:computer-id}
partition = $${slap-connection:partition-id}
url = $${slap-connection:server-url}
key = $${slap-connection:key-file}
cert = $${slap-connection:cert-file}
[directory]
recipe = slapos.cookbook:mkdirectory
bin = $${buildout:directory}/bin
tmp = $${buildout:directory}/tmp
[cythonplus-repository]
recipe = slapos.recipe.build:gitclone
repository = ${cythonplus-repository:location}
git-executable = ${git:location}/bin/git
shared = true
[runTestSuite]
recipe = slapos.recipe.template:jinja2
output = $${directory:bin}/$${:_buildout_section_name_}
url = ${runTestSuite.in:target}
context =
key tmpdir directory:tmp
key slapparameter_dict slap-configuration:configuration
key cythonplus_repository cythonplus-repository:location
raw runTestSuite_interpreter ${runTestSuite_interpreter:bin-directory}/${runTestSuite_interpreter:interpreter}
raw cythonplus_env_sh ${cythonplus_env.sh:output}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cythonplus-dev/runTestSuite.in 0000664 0000000 0000000 00000007645 14241130220 0031565 0 ustar 00root root 0000000 0000000 #!{{ runTestSuite_interpreter }}
"""
Script to run the Cython test suite using Nexedi's test node framework.
"""
import argparse, json, os, subprocess, sys, traceback
from time import gmtime, strftime, time
from unittest import TextTestResult
from erp5.util import taskdistribution, testsuite
os.environ['TEMP'] = {{ repr(tmpdir) }}
command = """. {{ cythonplus_env_sh }}
make all test
"""
def parseTestStdOut(data):
"""
Parse output of Cython testrunner script.
"""
data = data.rsplit(b'\n' + TextTestResult.separator2.encode() + b'\n', 1)[1]
status_dict = {}
search = testsuite.TestSuite.RUN_RE.search(data)
if search:
groupdict = search.groupdict()
status_dict.update(duration=float(groupdict['seconds']),
test_count=int(groupdict['all_tests']))
search = testsuite.TestSuite.STATUS_RE.search(data)
if search:
groupdict = search.groupdict()
status_dict.update(
error_count=int(groupdict['errors'] or 0),
failure_count=int(groupdict['failures'] or 0)
+int(groupdict['unexpected_successes'] or 0),
skip_count=int(groupdict['skips'] or 0)
+int(groupdict['expected_failures'] or 0))
return status_dict
class DummyTestResult:
class DummyTestResultLine:
def stop(self, **kw):
with open(self.name + '.json', 'w') as f:
json.dump(kw, f)
done = 0
def __init__(self, test_name_list):
self.test_name_list = test_name_list
def start(self):
test_result_line = self.DummyTestResultLine()
try:
test_result_line.name = self.test_name_list[self.done]
except IndexError:
return
self.done += 1
return test_result_line
def main():
parser = argparse.ArgumentParser(description='Run a test suite.')
parser.add_argument('--test_suite', help='The test suite name')
parser.add_argument('--test_suite_title', help='The test suite title')
parser.add_argument('--test_node_title', help='The test node title')
parser.add_argument('--project_title', help='The project title')
parser.add_argument('--revision', help='The revision to test',
default='dummy_revision')
parser.add_argument('--node_quantity', help='ignored', type=int)
parser.add_argument('--master_url',
help='The Url of Master controling many suites')
args = parser.parse_args()
test_suite_title = args.test_suite_title or args.test_suite
test_name_list = 'cython',
if args.master_url:
tool = taskdistribution.TaskDistributor(portal_url = args.master_url)
test_result = tool.createTestResult(args.revision,
test_name_list,
args.test_node_title,
test_title=test_suite_title,
project_title=args.project_title)
if test_result is None:
return
else:
test_result = DummyTestResult(test_name_list)
# run NEO tests
while 1:
test_result_line = test_result.start()
if not test_result_line:
break
try:
with open(os.devnull) as stdin:
p = subprocess.Popen(command, shell=True, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd={{ repr(cythonplus_repository) }})
except Exception:
end = time()
stderr = traceback.format_exc()
status_dict = {}
sys.stderr.write(stderr)
else:
stdout, stderr = p.communicate()
end = time()
os.write(1, stdout)
os.write(2, stderr)
status_dict = parseTestStdOut(stderr)
if str is not bytes:
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
status_dict['stdout'] = stdout
# report status back to Nexedi ERP5
test_result_line.stop(
command=command,
date=strftime("%Y/%m/%d %H:%M:%S", gmtime(end)),
stderr=stderr,
**status_dict)
if __name__ == "__main__":
main()
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/cythonplus-dev/software.cfg 0000664 0000000 0000000 00000001024 14241130220 0031053 0 ustar 00root root 0000000 0000000 [buildout]
extends =
buildout.hash.cfg
../../stack/slapos.cfg
../../component/cythonplus/buildout.cfg
parts =
slapos-cookbook
instance.cfg
[instance.cfg]
recipe = slapos.recipe.template
output = ${buildout:directory}/${:_buildout_section_name_}
url = ${:_profile_base_location_}/${:filename}
[runTestSuite.in]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:_update_hash_filename_}
[runTestSuite_interpreter]
recipe = zc.recipe.egg
eggs = erp5.util
interpreter = ${:_buildout_section_name_}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/django-dev/ 0000775 0000000 0000000 00000000000 14241130220 0025575 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/django-dev/software.cfg 0000664 0000000 0000000 00000001241 14241130220 0030106 0 ustar 00root root 0000000 0000000 [buildout]
extends =
../../stack/slapos.cfg
../../component/macros/virtual-env.cfg
parts =
instance
slapos-cookbook
[python]
part = python3
[django-env]
<= virtual-env-base
location = ${buildout:directory}/activate
eggs = Django
[instance]
recipe = slapos.recipe.template
output = ${buildout:directory}/instance.cfg
inline =
[buildout]
parts = publish
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
[publish]
recipe = slapos.cookbook:publish
activate-script = ${django-env:location}
[versions]
Django = 3.2.12
sqlparse = 0.4.2
pytz = 2021.3
asgiref = 3.3.2
typing-extensions = 4.1.1:whl
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/dream/ 0000775 0000000 0000000 00000000000 14241130220 0024647 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/dream/buildout.hash.cfg 0000664 0000000 0000000 00000000115 14241130220 0030076 0 ustar 00root root 0000000 0000000 [instance]
filename = instance.cfg
md5sum = a4e19280bc672cc98e0fef241c8439ba
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/dream/instance.cfg 0000664 0000000 0000000 00000004430 14241130220 0027135 0 ustar 00root root 0000000 0000000 [buildout]
parts =
dream_simulation
dream_platform
dream_test_suite
dream_interpreter
grunt_watch
publish-connection-parameter
dream-platform-url-available
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
offline = true
extends = ${monitor-template:output}
# parameters
[instance-parameter]
recipe = slapos.cookbook:slapconfiguration
computer = $${slap-connection:computer-id}
partition = $${slap-connection:partition-id}
url = $${slap-connection:server-url}
key = $${slap-connection:key-file}
cert = $${slap-connection:cert-file}
[dream_platform_parameter]
port = 18080
host = $${instance-parameter:ipv6-random}
url = http://[$${:host}]:$${:port}
# interpreter
[dream_interpreter]
recipe = slapos.cookbook:wrapper
command-line = ${buildout:bin-directory}/dream_interpreter
wrapper-path = $${buildout:bin-directory}/dream_interpreter
# service
[dream_platform]
recipe = slapos.cookbook:wrapper
command-line = ${buildout:bin-directory}/dream_platform --debug --host $${dream_platform_parameter:host} --port $${dream_platform_parameter:port} --log $${directory:log}/dream_platform.log
wrapper-path = $${directory:service}/dream_platform
[dream-platform-url-available]
<= monitor-promise-base
promise = check_url_available
name = $${:_buildout_section_name_}.py
config-url= $${dream_platform_parameter:url}
[grunt_watch]
recipe = slapos.cookbook:wrapper
command-line = bash -c 'cd ${dream-repository.git:location}; PATH=${nodejs:location}/bin/:$PATH ${dream-repository.git:location}/node_modules/grunt-cli/bin/grunt watch -f > $${directory:log}/grunt.log'
wrapper-path = $${directory:service}/dream_grunt_watch
# CLI
[dream_simulation]
recipe = slapos.cookbook:wrapper
command-line = ${buildout:bin-directory}/dream_simulation
wrapper-path = $${directory:script}/dream_simulation
[dream_test_suite]
recipe = slapos.cookbook:wrapper
command-line = ${dream_testrunner:script}
wrapper-path = $${directory:script}/dream_test_suite
[directory]
recipe = slapos.cookbook:mkdirectory
home = $${buildout:directory}
etc = $${:home}/etc
var = $${:home}/var
script = $${:etc}/run/
service = $${:etc}/service
promise = $${:etc}/promise/
log = $${:var}/log
[publish-connection-parameter]
recipe = slapos.cookbook:publishurl
url = $${dream_platform_parameter:url}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/dream/software.cfg 0000664 0000000 0000000 00000002217 14241130220 0027164 0 ustar 00root root 0000000 0000000 [buildout]
versions = versions
extends =
../../stack/slapos.cfg
../../stack/nodejs.cfg
../../component/manpy/buildout.cfg
../../stack/monitor/buildout.cfg
./buildout.hash.cfg
parts =
slapos-cookbook
manpy
dream_testrunner
npm_install
instance
[gcc]
# Always build GCC for Fortran (see openblas).
max_version = 0
[instance]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/instance.cfg
[dream_testrunner]
recipe = zc.recipe.testrunner
eggs = dream
script = dream_testrunner
initialization =
${manpy:initialization}
[nodejs]
<= nodejs-8.9.4
[npm_install]
recipe = plone.recipe.command
stop-on-error = true
command =
cd ${dream-repository.git:location}
PATH=${git:location}/bin:${nodejs:location}/bin:$PATH
npm install .
update_command = ${:command}
[versions]
rpy2 = 2.4.0
pydot = 1.0.28
xlrd = 0.9.3
xlwt = 0.7.5
scipy = 0.13.3
simpy = 3.0.5
zope.dottedname = 4.1.0
tablib = 0.10.0
mysqlclient = 1.3.12
# indirect dependancies
cp.recipe.cmd = 0.5
plone.recipe.command = 1.1
zope.exceptions = 4.0.7
zope.testing = 4.1.3
zc.recipe.testrunner = 2.0.0
zope.testrunner = 4.4.6
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/dream/software_testnode.cfg 0000664 0000000 0000000 00000000477 14241130220 0031077 0 ustar 00root root 0000000 0000000 [buildout]
extends =
software.cfg
# nodejs installation script does not support too deep directory structure like
# we can have when installing testnode in a webrunner. Since we do not need
# nodejs for the simulation executor, we can simply disable this section
[nodejs]
recipe =
location =
[npm_install]
recipe =
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/dream/test/ 0000775 0000000 0000000 00000000000 14241130220 0025626 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/dream/test/README.md 0000664 0000000 0000000 00000000041 14241130220 0027100 0 ustar 00root root 0000000 0000000 Tests for DREAM software release
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/dream/test/setup.py 0000664 0000000 0000000 00000003704 14241130220 0027344 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.dream'
with open("README.md") as f:
long_description = f.read()
setup(name=name,
version=version,
description="Test for SlapOS' DREAM",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'slapos.cookbook',
'slapos.libnetworkcache',
'requests'
],
zip_safe=True,
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/dream/test/test.py 0000664 0000000 0000000 00000003467 14241130220 0027171 0 ustar 00root root 0000000 0000000 ##############################################################################
# coding: utf-8
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import os
import requests
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, DREAMTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'software.cfg')))
class TestHTTPAccess(DREAMTestCase):
def setUp(self):
self.url = self.computer_partition.getConnectionParameterDict()["url"]
def test(self):
self.assertEqual(
requests.codes.ok,
requests.get(self.url, verify=False).status_code
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/ 0000775 0000000 0000000 00000000000 14241130220 0024432 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/README.rst 0000664 0000000 0000000 00000007310 14241130220 0026122 0 ustar 00root root 0000000 0000000 Available ``software-type`` values
==================================
- ``default``
Recommended for development and production use. Automatic creation of
erp5-site.
Notes
=====
This software release is not intended to be accessed directly, but through a
front-end instance which is expected to contains the RewriteRules_ (or
equivalent) needed to relocate Zope's urls via its VirtualHostMonster_. See the
``frontend`` erp5 instance parameter.
ERP5 defaults connect to the public cloudooo on https://cloudooo.erp5.net/.
See the ``cloudooo`` Software Release to setup a cloudooo cluster if necessary.
Replication
===========
Replication allows setting up an ERP5 instance whose data follows another
instance.
Relations between ERP5 instances in a replication graph depend in what is
supported by individual data managers (ex: a neo cluster can replicate from a
neo cluster which itself replicates from a 3rd).
Replication lag constraints (aka sync/async replication) depends on individual
data managers (ex: neo replication between clusters is always asynchronous).
Ignoring replication lag, replicated data can be strictly identical (ex:
replicating ZODB or SQL database will contain the same data as upstream), or
may imply some remaping (ex: replicating Zope logs from an instance with 2 zope
families with 2 partition of 2 zopes each to an instance with a single zope
total).
Data whose replication is supported
-----------------------------------
- neo database
Data whose replication will eventually be supported
---------------------------------------------------
- mariadb database
- zope ``zope-*-access.log`` and ``zope-*-Z2.log``
- ``mariadb-slow.log``
Data whose replication is not planned
-------------------------------------
- zeo: use neo instead
Setting up replication
----------------------
In addition to your usual parameter set, you needs to provide the following parameters::
{
"zope-partition-dict": {}, So no zope is instantiated
"zodb": [
{
"storage-dict": {
"upstream-masters": ..., As published by to-become upstream ERP5 instance as "neo-masters"
},
"type": "neo", The only ZODB type supporting replication
...
}
...
]
...
}
Port ranges
===========
This software release assigns the following port ranges by default:
==================== ==========
Partition type Port range
==================== ==========
memcached-persistent 2000-2009
memcached-volatile 2010-2019
smtp 2025-2029
neo (admin, master) 2050-2052
mariadb 2099
zeo 2100-2149
balancer 2150-2199
zope 2200-*
jupyter 8888
caucase 8890,8891
==================== ==========
Non-zope partitions are unique in an ERP5 cluster, so you shouldn't have to
care about them as a user (but a Software Release developer needs to know
them).
Zope partitions should be assigned port ranges starting at 2200, incrementing
by some value which depends on how many zope process you want per partition
(see the ``port-base`` parameter in ``zope-partition-dict``).
Notes to the Software Release developer: These ranges are not strictly
defined. Not each port is actually used so one may reduce alread-assigned
ranges if needed (ex: memcached partitions use actually fewer ports). There
should be enough room for evolution (as between smtp and mariadb types). It is
important to not allocate any port after 2200 as user may have assigned ports
to his zope processes.
.. _RewriteRules: http://httpd.apache.org/docs/current/en/mod/mod_rewrite.html#rewriterule
.. _VirtualHostMonster: http://docs.zope.org/zope2/zope2book/VirtualHosting.html instance-erp5-input-schema.json 0000664 0000000 0000000 00000057014 14241130220 0032325 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5 {
"$schema": "http://json-schema.org/draft-07/schema#",
"description": "Parameters to instantiate ERP5",
"type": "object",
"additionalProperties": false,
"definitions": {
"routing-rule-list": {
"description": "Maps the path received in requests to given zope path. Rules are applied in the order they are given. This requires the path received from the outside world (typically: frontend) to have its root correspond to Zope's root (for frontend: 'path' parameter must be empty), with the customary VirtualHostMonster construct (for frontend: 'type' must be 'zope').",
"type": "array",
"default": [
[
"/",
"/"
]
],
"items": {
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": [
{
"title": "External path",
"description": "Path as received from the outside world, based on VirtualHostRoot element.",
"type": "string"
},
{
"title": "Internal path",
"description": "Zope path, based on Zope root object, the external path should correspond to. '%(site-id)s' is replaced by the site-id value, and '%%' replaced by '%'.",
"type": "string"
}
]
}
},
"tcpv4port": {
"$ref": "./schemas-definitions.json#/tcpv4port"
}
},
"properties": {
"sla-dict": {
"description": "Where to request instances. Each key is a query string for criterions (e.g. \"computer_guid=foo\"), and each value is a list of partition references (note: Zope partitions reference must be prefixed with \"zope-\").",
"additionalProperties": {
"type": "array",
"items": {
"type": "string"
},
"uniqueItems": true
},
"type": "object"
},
"site-id": {
"description": "ERP5Site object's id. An empty value disables automatic site creation.",
"default": "erp5",
"type": "string"
},
"bt5": {
"description": "Business Template to install at automatic site creation. By default, all configurators are installed.",
"type": "string"
},
"id-store-interval": {
"description": "Set Store Interval of default SQL Non Continuous Increasing Id Generator at automatic site creation. If unset, the value from the erp5_core Business Template is not touched.",
"type": "integer"
},
"timezone": {
"description": "Zope's timezone. Possible values are determined by host's libc, and typically come from a separate package (tzdata, ...)",
"default": "UTC",
"type": "string"
},
"deadlock-debugger-password": {
"description": "Password for /manage_debug_threads",
"type": "string"
},
"inituser-login": {
"description": "Login of the initial/rescue user",
"default": "zope",
"type": "string"
},
"inituser-password": {
"description": "Password of the initial/rescue user",
"type": "string"
},
"developer-list": {
"description": "List of logins which should get the Developer role (required to modify portal_components' content), defaulting to inituser-login's value",
"items": {
"pattern": "^\\S+$",
"type": "string"
},
"uniqueItems": true,
"type": "array"
},
"activity-timeout": {
"description": "How long a CMFActivity-initiated transaction may last, in seconds",
"default": null,
"type": [
"number",
"null"
]
},
"publisher-timeout": {
"description": "How long a publisher-initiated transaction may last, in seconds",
"default": null,
"type": [
"number",
"null"
]
},
"family-override": {
"description": "Family-wide options, possibly overriding global options",
"default": {},
"patternProperties": {
".*": {
"default": {},
"properties": {
"webdav": {
"description": "Serve webdav queries, implies timerserver-interval=0 (disabled)",
"default": false,
"type": "boolean"
},
"activity-timeout": {
"description": "Override global activity timeout",
"type": [
"number",
"null"
]
},
"publisher-timeout": {
"description": "Override global publisher timeout",
"type": [
"number",
"null"
]
}
},
"type": "object"
}
},
"type": "object"
},
"hostalias-dict": {
"description": "Hostname-to-domain-name mapping",
"default": {},
"additionalProperties": {
"description": "A hostname to which current entry will resolve",
"type": "string"
},
"type": "object"
},
"hosts-dict": {
"description": "Host entries to be used in addition to and/or overriding auto-generated ones (erp5-catalog-0, erp5-cloudooo, erp5-memcached-persistent, erp5-memcached-volatile and erp5-smtp)",
"patternProperties": {
".*": {
"description": "An IP or domain name to which current entry will resolve",
"type": "string"
}
},
"type": "object"
},
"frontend": {
"description": "Front-end slave instance request parameters",
"properties": {
"software-url": {
"description": "Front-end's software type. If this parameter is empty, no front-end instance is requested. Else, sla-dict must specify 'frontend' which is a special value matching all frontends (e.g. {\"instance_guid=bar\": [\"frontend\"]}).",
"default": "",
"type": "string",
"format": "uri"
},
"domain": {
"description": "The domain name to request front-end to respond as.",
"default": "",
"type": "string"
},
"software-type": {
"description": "Request a front-end slave instance of this software type.",
"default": "RootSoftwareInstance",
"type": "string"
},
"virtualhostroot-http-port": {
"description": "Front-end slave http port. Port where http requests to frontend will be redirected.",
"default": 80,
"type": "integer"
},
"virtualhostroot-https-port": {
"description": "Front-end slave https port. Port where https requests to frontend will be redirected.",
"default": 443,
"type": "integer"
}
},
"type": "object"
},
"wsgi": {
"description": "If set to true, Zope is run as a WSGI application, instead of using the Medusa HTTP server.",
"type": "boolean",
"default": true
},
"zope-partition-dict": {
"description": "Zope layout definition",
"default": {
"1": {}
},
"patternProperties": {
".*": {
"additionalProperties": false,
"properties": {
"family": {
"description": "The family this partition is part of. For example: 'public', 'admin', 'backoffice', 'web-service'... Each family gets its own balancer entry. It has no special meaning for the system.",
"default": "default",
"type": "string"
},
"instance-count": {
"description": "Number of Zopes to setup on this partition",
"default": 1,
"type": "integer"
},
"thread-amount": {
"description": "Number of worker threads for each created Zope process",
"default": 4,
"type": "integer"
},
"timerserver-interval": {
"description": "Timerserver tick period, in seconds, or 0 to disable",
"default": 1,
"type": "number"
},
"private-dev-shm": {
"description": "Size of private /dev/shm for wendelin.core. If sysctl kernel.unprivileged_userns_clone exists, it must be set to 1.",
"type": "string"
},
"ssl-authentication": {
"title": "Enable SSL Client authentication on this zope instance.",
"description": "If set to true, will set SSL Client verification to required on apache VirtualHost which allow to access this zope instance.",
"type": "boolean",
"default": false
},
"longrequest-logger-interval": {
"description": "Period, in seconds, with which LongRequestLogger polls worker thread stack traces, or -1 to disable",
"default": -1,
"type": "integer"
},
"longrequest-logger-timeout": {
"description": "Transaction duration after which LongRequestLogger will start logging its stack trace, in seconds",
"default": 1,
"type": "integer"
},
"large-file-threshold": {
"description": "Requests bigger than this size get saved into a temporary file instead of being read completely into memory, in bytes",
"default": "10MB",
"type": "string"
},
"port-base": {
"allOf": [
{
"$ref": "#/definitions/tcpv4port"
},
{
"description": "Start allocating ports at this value. Useful if one needs to make several partitions share the same port range (ie, several partitions bound to a single address)",
"default": 2200
}
]
}
},
"type": "object"
}
},
"type": "object"
},
"kumofs": {
"description": "Persistent memcached service",
"allOf": [
{
"$ref": "./instance-kumofs-schema.json"
},
{
"properties": {
"tcpv4-port": {
"default": 2000
}
}
}
],
"type": "object"
},
"memcached": {
"description": "Volatile memcached service",
"allOf": [
{
"$ref": "./instance-kumofs-schema.json"
},
{
"properties": {
"tcpv4-port": {
"default": 2010
}
}
}
],
"type": "object"
},
"cloudooo-url": {
"description": "Format conversion service URL",
"pattern": "^https?://",
"type": "string",
"format": "uri"
},
"cloudooo-retry-count": {
"description": "Define retry count for cloudooo in network error case in test",
"type": "integer",
"default": 2
},
"smtp": {
"description": "Mail queuing and relay service",
"allOf": [
{
"$ref": "./instance-smtp-schema.json"
},
{
"properties": {
"tcpv4-port": {
"default": 2010
}
}
}
],
"type": "object"
},
"mariadb": {
"description": "Relational database service",
"allOf": [
{
"$ref": "./instance-mariadb-schema.json"
},
{
"properties": {
"tcpv4-port": {
"default": 2099
}
}
}
],
"type": "object"
},
"zodb-zeo": {
"description": "Common settings ZEO servers",
"properties": {
"tcpv4-port": {
"allOf": [
{
"$ref": "#/definitions/tcpv4port"
},
{
"description": "Start allocating ports at this value, going upward"
}
]
},
"backup-periodicity": {
"description": "When to backup, specified in the same format as for systemd.time(7) calendar events (years & seconds not supported, DoW & DoM can not be combined). Enter 'never' to disable backups.",
"default": "daily",
"type": "string"
},
"tidstorage-repozo-path": {
"description": "Directory for backup timestamp and tidstorage status files.",
"default": "~/srv/backup/tidstorage",
"type": "string"
}
},
"type": "object"
},
"zodb": {
"description": "Zope Object DataBase mountpoints. See https://github.com/zopefoundation/ZODB/blob/4/src/ZODB/component.xml for extra options.",
"items": {
"required": [
"type"
],
"properties": {
"name": {
"description": "Database name",
"default": "main",
"type": "string"
},
"mount-point": {
"description": "Mount point",
"default": "/",
"type": "string"
},
"storage-dict": {
"description": "Storage configuration. For NEO, 'logfile' is automatically set (see https://lab.nexedi.com/nexedi/neoppod/blob/master/neo/client/component.xml for other settings).",
"properties": {
"ssl": {
"description": "For external NEO. Pass false if you want to disable SSL or pass custom values for ca/cert/key.",
"default": true,
"type": "boolean"
}
},
"patternProperties": {
".!$": {
"$ref": "#/properties/zodb/items/patternProperties/.!$"
}
},
"additionalProperties": {
"$ref": "#/properties/zodb/items/additionalProperties"
},
"type": "object"
},
"type": true,
"server": true
},
"oneOf": [
{
"title": "zeo",
"properties": {
"type": {
"description": "Storage type",
"const": "zeo"
},
"server": {
"description": "Instantiate a server. If missing, 'storage-dict' must contain the necessary properties to mount the ZODB. The partition reference is 'zodb'.",
"$ref": "./instance-zeo-schema.json"
}
}
},
{
"title": "neo",
"properties": {
"type": {
"description": "Storage type",
"const": "neo"
},
"server": {
"description": "Instantiate a server. If missing, 'storage-dict' must contain the necessary properties to mount the ZODB. Partitions references are 'neo-0', 'neo-1', ...",
"$ref": "../neoppod/instance-neo-input-schema.json#/definitions/neo-cluster"
}
}
}
],
"patternProperties": {
".!$": {
"description": "Override with the value of the first item whose zope id matches against the pattern.",
"items": {
"items": [
{
"description": "Override pattern (Python regular expression).",
"type": "string"
},
{
"description": "Override value (parameter for matching nodes).",
"type": [
"integer",
"string"
]
}
],
"type": "array"
},
"type": "array"
}
},
"additionalProperties": {
"type": [
"integer",
"string"
]
},
"type": "object"
},
"type": "array"
},
"jupyter": {
"description": "Jupyter subinstance parameters",
"properties": {
"enable": {
"description": "Whether to enable creation of associated Jupyter subinstance",
"default": false,
"type": "boolean"
},
"zope-family": {
"description": "Zope family to connect Jupyter to by default",
"default": "",
"type": "string"
}
},
"type": "object"
},
"wcfs": {
"description": "Parameters for wendelin.core filesystem",
"properties": {
"enable": {
"description": "Whether to enable WCFS filesystem and use it to access ZBigArray/ZBigFile data. In WCFS mode wendelin.core clients (Zope/ERP5 processes) share in-RAM cache for in-ZODB data without duplicating it for every client. This cache sharing does not affect correctness as isolation property is continued to be provided to every client.",
"default": false,
"type": "boolean"
}
}
},
"wendelin-core-zblk-fmt": {
"description": "In wendelin.core there are 2 formats for storing data, so called ZBlk0 and ZBlk1. See https://lab.nexedi.com/nexedi/wendelin.core/blob/2e5e1d3d/bigfile/file_zodb.py#L19 for more details.",
"default": "",
"type": "string"
},
"caucase": {
"description": "Caucase certificate authority parameters",
"properties": {
"url": {
"title": "Caucase URL",
"description": "URL of existing caucase instance to use. If empty, a new caucase instance will be deployed. If not empty, other properties in this section will be ignored.",
"default": "",
"type": "string",
"format": "uri"
}
},
"additionalProperties": {
"$ref": "../caucase/instance-caucase-input-schema.json"
},
"type": "object"
},
"test-runner": {
"description": "Test runner parameters.",
"properties": {
"enabled": {
"description": "Generate helper scripts to run test suite.",
"default": true,
"type": "boolean"
},
"node-count": {
"description": "Number of tests this instance can execute in parallel. This must be at least equal to the number of nodes configured on testnode running the test",
"default": 3,
"type": "integer"
},
"extra-database-count": {
"description": "Number of extra databases this instance tests will need.",
"default": 3,
"type": "integer"
},
"selenium": {
"default": {
"target": "firefox"
},
"examples": [
{
"target": "selenium-server",
"server-url": "https://selenium.example.com",
"desired-capabilities": {
"browserName": "firefox",
"version": "68.0.2esr",
"acceptInsecureCerts": true
}
},
{
"target": "selenium-server",
"server-url": "https://selenium.example.com",
"desired-capabilities": {
"browserName": "chrome",
"version": "91.0.4472.101"
}
}
],
"oneOf": [
{
"type": "object",
"title": "Selenium Server",
"description": "Configuration for Selenium server",
"additionalProperties": false,
"required": [
"desired-capabilities",
"server-url",
"target"
],
"properties": {
"target": {
"description": "Target system",
"type": "string",
"const": "selenium-server",
"default": "selenium-server"
},
"server-url": {
"description": "URL of the selenium server",
"type": "string",
"format": "uri"
},
"verify-server-certificate": {
"description": "Verify the SSL/TLS certificate of the selenium server when using HTTPS",
"type": "boolean",
"default": true
},
"server-ca-certificate": {
"description": "PEM encoded bundle of CA certificates to verify the SSL/TLS certificate of the selenium server when using HTTPS",
"type": "string",
"default": "Root certificates from http://certifi.io/en/latest/"
},
"desired-capabilities": {
"description": "Desired browser capabilities",
"required": [
"browserName"
],
"type": "object",
"properties": {
"browserName": {
"description": "Name of the browser being used",
"type": "string",
"examples": [
"firefox",
"chrome",
"safari"
]
},
"version": {
"description": "The browser version",
"type": "string"
}
}
}
}
},
{
"type": "object",
"title": "Firefox",
"description": "Configuration for using firefox running as a sub-process",
"additionalProperties": false,
"properties": {
"target": {
"description": "Target system",
"const": "firefox",
"type": "string",
"default": "firefox"
}
}
}
]
},
"random-activity-priority": {
"type": "string",
"title": "Random Activity Priority",
"description": "Control `random_activity_priority` argument of test runner. Can be set to an empty string to automatically generate a seed for each test."
}
},
"type": "object"
},
"balancer": {
"description": "HTTP(S) load balancer proxy parameters",
"properties": {
"path-routing-list": {
"$ref": "#/definitions/routing-rule-list",
"title": "Global path routing rules"
},
"family-path-routing-dict": {
"type": "object",
"title": "Family-specific path routing rules",
"description": "Applied, only for the eponymous family, before global path routing rules.",
"patternProperties": {
".+": {
"$ref": "#/definitions/routing-rule-list"
}
}
},
"ssl": {
"description": "HTTPS certificate generation parameters",
"properties": {
"frontend-caucase-url-list": {
"title": "Frontend Caucase URL List",
"description": "List of URLs of caucase service of frontend groups to authenticate access from them.",
"type": "array",
"items": {
"type": "string",
"format": "uri"
},
"uniqueItems": true
},
"caucase-url": {
"title": "Caucase URL",
"description": "URL of caucase service to use. If not set, global setting will be used.",
"type": "string",
"format": "uri"
},
"csr": {
"title": "csr",
"description": "PEM-encoded certificate signature request to request server certificate with. If not provided, HTTPS will be disabled.",
"type": "string"
},
"max-crl-update-delay": {
"title": "Periodicity of CRL update (days)",
"description": "CRL will be updated from caucase at least this often.",
"type": "number",
"default": 1.0
}
},
"type": "object"
}
},
"type": "object"
}
}
}
instance-erp5-output-schema.json 0000664 0000000 0000000 00000004663 14241130220 0032530 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5 {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Values returned by ERP5 instantiation",
"additionalProperties": false,
"properties": {
"hosts-dict": {
"description": "Hosts mapping, including auto-generated entries",
"patternProperties": {
".*": {
"description": "IP current entry resolves to",
"type": "string"
}
},
"type": "object"
},
"site-id": {
"description": "Chosen ERP5Site object identifier",
"type": "string"
},
"inituser-login": {
"description": "Initial user login",
"type": "string"
},
"inituser-password": {
"description": "Initial user password",
"type": "string"
},
"deadlock-debugger-password": {
"description": "Deadlock debugger password",
"type": "string"
},
"memcached-persistent-url": {
"description": "Persistent memcached access information",
"pattern": "^memcached://",
"type": "string"
},
"memcached-volatile-url": {
"description": "Volatile memcached access information",
"pattern": "^memcached://",
"type": "string"
},
"mariadb-database-list": {
"description": "Relational database access information",
"items": {
"pattern": "^mysql://",
"type": "string"
},
"uniqueItems": true,
"type": "array"
},
"mariadb-test-database-list": {
"description": "Relational database access information",
"items": {
"pattern": "^mysql://",
"type": "string"
},
"uniqueItems": true,
"type": "array"
},
"neo-masters": {
"$ref": "../neoppod/instance-neo-output-schema.json#/properties/masters"
},
"neo-admins": {
"$ref": "../neoppod/instance-neo-output-schema.json#/properties/admins"
},
"jupyter-url": {
"description": "Jupyter notebook web UI access information",
"pattern": "^https://",
"type": "string"
},
"caucase-http-url": {
"description": "Caucase url on HTTP. For HTTPS URL, uses https scheme, if port is explicitely specified in http URL, take that port and add 1 and use it as https port. If it is not specified.",
"pattern": "^http://",
"type": "string"
}
},
"patternProperties": {
"family-.*": {
"description": "Zope family access information",
"pattern": "^https://",
"type": "string"
}
},
"type": "object"
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/instance-kumofs-schema.json 0000664 0000000 0000000 00000001003 14241130220 0031663 0 ustar 00root root 0000000 0000000 {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"tcpv4-port": {
"allOf": [
{
"$ref": "./schemas-definitions.json#/tcpv4port"
},
{
"description": "Start allocating ports at this value, going upward"
}
]
},
"ram-storage-size": {
"description": "If 0 use disk storage, otherwise use ram and limit data size to this many megabytes",
"default": 0,
"type": "integer"
}
}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/instance-mariadb-schema.json 0000664 0000000 0000000 00000012067 14241130220 0031772 0 ustar 00root root 0000000 0000000 {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"tcpv4-port": {
"allOf": [
{
"$ref": "./schemas-definitions.json#/tcpv4port"
},
{
"description": "Start allocating ports at this value, going downward"
}
]
},
"database-list": {
"description": "Databases to create and respective user credentials getting all privileges on it",
"default": [
{
"name": "erp5",
"user": "user",
"password": "insecure"
}
],
"minItems": 1,
"items": {
"required": [
"name",
"user",
"password"
],
"properties": {
"name": {
"description": "Database name",
"type": "string"
},
"user": {
"description": "User name",
"type": "string"
},
"password": {
"description": "User password",
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"catalog-backup": {
"description": "Backup control knobs",
"properties": {
"full-retention-days": {
"description": "How many days full backups must be retained, -1 meaning full backups are disabled and 0 meaning no expiration",
"default": 7,
"minimum": -1,
"type": "integer"
},
"incremental-retention-days": {
"description": "How many days incremental backups (binlogs) must be retained, -1 meaning incremental backups are disabled and 0 meaning no expiration, defaulting to full-retention-days' value",
"minimum": -1,
"type": "integer"
}
},
"type": "object"
},
"backup-periodicity": {
"description": "When to backup, specified in the same format as for systemd.time(7) calendar events (years & seconds not supported, DoW & DoM can not be combined).",
"default": "daily",
"type": "string"
},
"innodb-buffer-pool-size": {
"description": "See MariaDB documentation on innodb_buffer_pool_size",
"minimum": 0,
"type": "integer"
},
"innodb-buffer-pool-instances": {
"description": "See MariaDB documentation on innodb_buffer_pool_instances",
"minimum": 1,
"type": "integer"
},
"innodb-log-file-size": {
"description": "See MariaDB documentation on innodb_log_file_size",
"minimum": 0,
"type": "integer"
},
"innodb-log-buffer-size": {
"description": "See MariaDB documentation on innodb_log_buffer_size",
"minimum": 0,
"type": "integer"
},
"innodb-file-per-table": {
"description": "See MariaDB documentation on innodb_file_per_table",
"minimum": 0,
"maximum": 1,
"default": 0,
"type": "integer"
},
"long-query-time": {
"description": "Number of seconds above which long queries are logged",
"minimum": 0,
"default": 1,
"type": "number"
},
"max-connection-count": {
"description": "See MariaDB documentation on max_connections. If not provided, a value suitable for the number of request Zope processes is chosen.",
"minimum": 0,
"type": "integer"
},
"relaxed-writes": {
"description": "When enabled, sets innodb_flush_log_at_trx_commit = 0, innodb_flush_method = nosync, innodb_doublewrite = 0 and sync_frm = 0 - RTFM, those options are dangerous",
"default": false,
"type": "boolean"
},
"character-set-server": {
"description": "The server default character set",
"default": "utf8mb4",
"type": "string"
},
"collation-server": {
"description": "The server default collation",
"default": "utf8mb4_general_ci",
"type": "string"
},
"ssl": {
"description": "Enable and define SSL support for network connections",
"default": {},
"properties": {
"ca-crt": {
"description": "Certificate Authority's certificate, in PEM format",
"type": "string"
},
"crt": {
"description": "Server's certificate, in PEM format (mandatory to enable SSL support)",
"type": "string"
},
"key": {
"description": "Server's key, in PEM format (mandatory to enable SSL support)",
"type": "string"
},
"crl": {
"description": "Server's certificate revocation list, in PEM format",
"type": "string"
},
"cipher": {
"description": "Permissible cipher specifications, separated by colons",
"type": "string"
}
},
"type": "object"
},
"odbc-ini": {
"description": "Contents of odbc.ini file, see unixodbc document",
"default": "",
"type": "string"
},
"environment-variables": {
"description": "Extra environment variables for mysqld may be required to use third party ODBC libraries for CONNECT storage engine.",
"items": {
"type": "string"
},
"type": "array"
}
}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/instance-smtp-schema.json 0000664 0000000 0000000 00000003370 14241130220 0031353 0 ustar 00root root 0000000 0000000 {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"tcpv4-port": {
"allOf": [
{
"$ref": "./schemas-definitions.json#/tcpv4port"
},
{
"description": "Start allocating ports at this value, going upward"
}
]
},
"postmaster": {
"description": "Mail address to send technical mails to. Non-empty value required for smptd relay service to be deployed. Values will be put in alias-dict as 'postmaster' key (alias-dict takes precedence)",
"default": "",
"type": "string"
},
"alias-dict": {
"description": "Mail alias support",
"default": {},
"patternProperties": {
".*": {
"description": "List of addresses alias expands to",
"type": "array"
}
},
"type": "object"
},
"relay": {
"description": "Forward outgoing mails to a specific relay. If enabled, relay must support TLS-encrypted SASL authentication.",
"dependencies": {
"host": [
"sasl-credential"
]
},
"properties": {
"host": {
"description": "Host name or address of relay, with optional port (ex: '[example.com]:submission'). Enclosing hostname with [] prevents MX lookup.",
"type": "string"
},
"sasl-credential": {
"description": "SASL credential, in the login:password form",
"type": "string"
}
},
"default": {},
"type": "object"
},
"divert": {
"description": "Intercept all mails and send them to given addresses instead of original recipient",
"type": "array",
"items": {
"type": "string"
},
"uniqueItems": true
}
}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/instance-zeo-schema.json 0000664 0000000 0000000 00000001605 14241130220 0031164 0 ustar 00root root 0000000 0000000 {
"$schema": "http://json-schema.org/draft-04/schema#",
"additionalProperties": false,
"properties": {
"backup": {
"description": "'%(backup)s' is expanded to partition's ZODB backup path (typically 'srv/backup/zodb'), and %(name)s with the export id",
"default": "%(backup)s/%(name)s",
"type": "string"
},
"family": {
"description": "Opaque name used to regroup/separate mountpoints under different ZEO processes (must be valid as a file name and as a ConfigParser section name)",
"default": "default",
"pattern": "^[^<>:\"/\\|?*\\]\\[ ]*$",
"type": "string"
},
"path": {
"description": "FileStorage file path, '%(zodb)s' occurrences are replaced with the path to partition's srv/zodb directory, and %(name)s with the export id",
"default": "%(zodb)s/%(name)s.fs",
"type": "string"
}
},
"type": "object"
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/schemas-definitions.json 0000664 0000000 0000000 00000000217 14241130220 0031261 0 ustar 00root root 0000000 0000000 {
"$schema": "http://json-schema.org/draft-07/schema#",
"tcpv4port": {
"minimum": 0,
"maximum": 65535,
"type": "integer"
}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/software-shared.cfg 0000664 0000000 0000000 00000000213 14241130220 0030205 0 ustar 00root root 0000000 0000000 [buildout]
extends = software.cfg
shared-parts = /opt/slapgrid/shared-parts
eggs-directory = /opt/slapgrid/shared-eggs
abi-tag-eggs = true
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/software.cfg 0000664 0000000 0000000 00000000065 14241130220 0026746 0 ustar 00root root 0000000 0000000 [buildout]
extends =
../../stack/erp5/buildout.cfg
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/software.cfg.json 0000664 0000000 0000000 00000000655 14241130220 0027723 0 ustar 00root root 0000000 0000000 {
"name": "ERP5",
"description": "ERP5, Open-Source ERP",
"serialisation": "json-in-xml",
"software-type": {
"default": {
"title": "Default",
"software-type": "default",
"description": "No automated database modification (ERP5Site is not automatically created).",
"request": "instance-erp5-input-schema.json",
"response": "instance-erp5-output-schema.json",
"index": 0
}
}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/test/ 0000775 0000000 0000000 00000000000 14241130220 0025411 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/test/README.md 0000664 0000000 0000000 00000000040 14241130220 0026662 0 ustar 00root root 0000000 0000000 Tests for ERP5 software release
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/test/setup.py 0000664 0000000 0000000 00000004167 14241130220 0027133 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from __future__ import absolute_import
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.erp5'
with open("README.md") as f:
long_description = f.read()
setup(name=name,
version=version,
description="Test for SlapOS' ERP5 software release",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'supervisor',
'slapos.libnetworkcache',
'erp5.util',
'psutil',
'requests',
'mysqlclient',
'backports.lzma',
'cryptography',
'pexpect',
'pyOpenSSL',
],
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/test/test/ 0000775 0000000 0000000 00000000000 14241130220 0026370 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/test/test/__init__.py 0000664 0000000 0000000 00000005114 14241130220 0030502 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from __future__ import absolute_import
import json
import os
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
_setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', 'software.cfg')))
setup_module_executed = False
def setUpModule():
# slapos.testing.testcase's only need to be executed once
global setup_module_executed
if not setup_module_executed:
_setUpModule()
setup_module_executed = True
class ERP5InstanceTestCase(SlapOSInstanceTestCase):
"""ERP5 base test case
"""
@classmethod
def getRootPartitionConnectionParameterDict(cls):
"""Return the output paramters from the root partition"""
return json.loads(
cls.computer_partition.getConnectionParameterDict()['_'])
@classmethod
def getComputerPartition(cls, partition_reference):
for computer_partition in cls.slap.computer.getComputerPartitionList():
if partition_reference == computer_partition.getInstanceParameter(
'instance_title'):
return computer_partition
@classmethod
def getComputerPartitionPath(cls, partition_reference):
partition_id = cls.getComputerPartition(partition_reference).getId()
return os.path.join(cls.slap._instance_root, partition_id)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/test/test/test_balancer.py 0000664 0000000 0000000 00000107762 14241130220 0031565 0 ustar 00root root 0000000 0000000 from __future__ import absolute_import
import glob
import hashlib
import json
import logging
import os
import re
import shutil
import subprocess
import tempfile
import time
import six.moves.urllib.request, six.moves.urllib.parse, six.moves.urllib.error
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler
import mock
import OpenSSL.SSL
import pexpect
import psutil
import requests
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from slapos.testing.testcase import ManagedResource
from slapos.testing.utils import (CrontabMixin, ManagedHTTPServer,
findFreeTCPPort)
from . import ERP5InstanceTestCase, setUpModule
from six.moves import range
setUpModule # pyflakes
class EchoHTTPServer(ManagedHTTPServer):
"""An HTTP Server responding with the request path and incoming headers,
encoded in json.
"""
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
# type: () -> None
self.send_response(200)
self.send_header("Content-Type", "application/json")
response = json.dumps(
{
'Path': self.path,
'Incoming Headers': dict(self.headers.items()),
},
indent=2,
).encode('utf-8')
self.end_headers()
self.wfile.write(response)
log_message = logging.getLogger(__name__ + '.EchoHTTPServer').info
class EchoHTTP11Server(ManagedHTTPServer):
"""An HTTP/1.1 Server responding with the request path and incoming headers,
encoded in json.
"""
class RequestHandler(BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
def do_GET(self):
# type: () -> None
self.send_response(200)
self.send_header("Content-Type", "application/json")
response = json.dumps(
{
'Path': self.path,
'Incoming Headers': dict(self.headers.items()),
},
indent=2,
).encode('utf-8')
self.send_header("Content-Length", str(len(response)))
self.end_headers()
self.wfile.write(response)
log_message = logging.getLogger(__name__ + '.EchoHTTP11Server').info
class CaucaseService(ManagedResource):
"""A caucase service.
"""
url = None # type: str
directory = None # type: str
_caucased_process = None # type: subprocess.Popen
def open(self):
# type: () -> None
# start a caucased and server certificate.
software_release_root_path = os.path.join(
self._cls.slap._software_root,
hashlib.md5(self._cls.getSoftwareURL().encode()).hexdigest(),
)
caucased_path = os.path.join(software_release_root_path, 'bin', 'caucased')
self.directory = tempfile.mkdtemp()
caucased_dir = os.path.join(self.directory, 'caucased')
os.mkdir(caucased_dir)
os.mkdir(os.path.join(caucased_dir, 'user'))
os.mkdir(os.path.join(caucased_dir, 'service'))
backend_caucased_netloc = '%s:%s' % (self._cls._ipv4_address, findFreeTCPPort(self._cls._ipv4_address))
self.url = 'http://' + backend_caucased_netloc
self._caucased_process = subprocess.Popen(
[
caucased_path,
'--db', os.path.join(caucased_dir, 'caucase.sqlite'),
'--server-key', os.path.join(caucased_dir, 'server.key.pem'),
'--netloc', backend_caucased_netloc,
'--service-auto-approve-count', '1',
],
# capture subprocess output not to pollute test's own stdout
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
for _ in range(30):
try:
if requests.get(self.url).status_code == 200:
break
except Exception:
pass
time.sleep(1)
else:
raise RuntimeError('caucased failed to start.')
def close(self):
# type: () -> None
self._caucased_process.terminate()
self._caucased_process.wait()
self._caucased_process.stdout.close()
shutil.rmtree(self.directory)
class BalancerTestCase(ERP5InstanceTestCase):
@classmethod
def getInstanceSoftwareType(cls):
return 'balancer'
@classmethod
def _getInstanceParameterDict(cls):
# type: () -> dict
return {
'tcpv4-port': 8000,
'computer-memory-percent-threshold': 100,
# XXX what is this ? should probably not be needed here
'name': cls.__name__,
'monitor-passwd': 'secret',
'apachedex-configuration': [
'--logformat', '%h %l %u %t "%r" %>s %O "%{Referer}i" "%{User-Agent}i" %{ms}T',
'--erp5-base', '+erp5', '.*/VirtualHostRoot/erp5(/|\\?|$)',
'--base', '+other', '/',
'--skip-user-agent', 'Zabbix',
'--error-detail',
'--js-embed',
'--quiet',
],
'apachedex-promise-threshold': 100,
'haproxy-server-check-path': '/',
'zope-family-dict': {
'default': ['dummy_http_server'],
},
'dummy_http_server': [[cls.getManagedResource("backend_web_server", EchoHTTPServer).netloc, 1, False]],
'backend-path-dict': {
'default': '',
},
'ssl-authentication-dict': {},
'ssl': {
'caucase-url': cls.getManagedResource("caucase", CaucaseService).url,
},
'family-path-routing-dict': {},
'path-routing-list': [],
}
@classmethod
def getInstanceParameterDict(cls):
# type: () -> dict
return {'_': json.dumps(cls._getInstanceParameterDict())}
def setUp(self):
# type: () -> None
self.default_balancer_url = json.loads(
self.computer_partition.getConnectionParameterDict()['_'])['default']
class SlowHTTPServer(ManagedHTTPServer):
"""An HTTP Server which reply after 2 seconds.
"""
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
# type: () -> None
self.send_response(200)
self.send_header("Content-Type", "text/plain")
time.sleep(2)
self.end_headers()
self.wfile.write(b"OK\n")
log_message = logging.getLogger(__name__ + '.SlowHandler').info
class TestLog(BalancerTestCase, CrontabMixin):
"""Check logs emitted by balancer
"""
__partition_reference__ = 'l'
@classmethod
def _getInstanceParameterDict(cls):
# type: () -> dict
parameter_dict = super(TestLog, cls)._getInstanceParameterDict()
# use a slow server instead
parameter_dict['dummy_http_server'] = [[cls.getManagedResource("slow_web_server", SlowHTTPServer).netloc, 1, False]]
return parameter_dict
def test_access_log_format(self):
# type: () -> None
requests.get(
six.moves.urllib.parse.urljoin(self.default_balancer_url, '/url_path'),
verify=False,
)
time.sleep(.5) # wait a bit more until access is logged
with open(os.path.join(self.computer_partition_root_path, 'var', 'log', 'apache-access.log')) as access_log_file:
access_line = access_log_file.read().splitlines()[-1]
self.assertIn('/url_path', access_line)
# last \d is the request time in milli seconds, since this SlowHTTPServer
# sleeps for 2 seconds, it should take between 2 and 3 seconds to process
# the request - but our test machines can be slow sometimes, so we tolerate
# it can take up to 20 seconds.
match = re.match(
r'([(\d\.)]+) - - \[(.*?)\] "(.*?)" (\d+) (\d+) "(.*?)" "(.*?)" (\d+)',
access_line
)
self.assertTrue(match)
assert match
request_time = int(match.groups()[-1])
self.assertGreater(request_time, 2 * 1000)
self.assertLess(request_time, 20 * 1000)
def test_access_log_apachedex_report(self):
# type: () -> None
# make a request so that we have something in the logs
requests.get(self.default_balancer_url, verify=False)
# crontab for apachedex is executed
self._executeCrontabAtDate('generate-apachedex-report', '23:59')
# it creates a report for the day
apachedex_report, = glob.glob(
os.path.join(
self.computer_partition_root_path,
'srv',
'monitor',
'private',
'apachedex',
'ApacheDex-*.html',
))
with open(apachedex_report, 'r') as f:
report_text = f.read()
self.assertIn('APacheDEX', report_text)
# having this table means that apachedex could parse some lines.
self.assertIn('Hits per status code ', report_text)
def test_access_log_rotation(self):
# type: () -> None
# run logrotate a first time so that it create state files
self._executeCrontabAtDate('logrotate', '2000-01-01')
# make a request so that we have something in the logs
requests.get(self.default_balancer_url, verify=False).raise_for_status()
# slow query crontab depends on crontab for log rotation
# to be executed first.
self._executeCrontabAtDate('logrotate', '2050-01-01')
# this logrotate leaves the log for the day as non compressed
rotated_log_file = os.path.join(
self.computer_partition_root_path,
'srv',
'backup',
'logrotate',
'apache-access.log-20500101',
)
self.assertTrue(os.path.exists(rotated_log_file))
requests.get(self.default_balancer_url, verify=False).raise_for_status()
# on next day execution of logrotate, log files are compressed
self._executeCrontabAtDate('logrotate', '2050-01-02')
self.assertTrue(os.path.exists(rotated_log_file + '.xz'))
self.assertFalse(os.path.exists(rotated_log_file))
def test_error_log(self):
# type: () -> None
# stop backend server
backend_server = self.getManagedResource("slow_web_server", SlowHTTPServer)
self.addCleanup(backend_server.open)
backend_server.close()
# after a while, balancer should detect and log this event in error log
time.sleep(5)
self.assertEqual(
requests.get(self.default_balancer_url, verify=False).status_code,
requests.codes.service_unavailable)
with open(os.path.join(self.computer_partition_root_path, 'var', 'log', 'apache-error.log')) as error_log_file:
error_line = error_log_file.read().splitlines()[-1]
self.assertIn('proxy family_default has no server available!', error_line)
# this log also include a timestamp
self.assertRegexpMatches(error_line, r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}')
class BalancerCookieHTTPServer(ManagedHTTPServer):
"""An HTTP Server which can set balancer cookie.
This server set cookie when requested /set-cookie path.
The reply body is the name used when registering this resource
using getManagedResource. This way we can assert which
backend replied.
"""
@property
def RequestHandler(self):
server = self
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
# type: () -> None
self.send_response(200)
self.send_header("Content-Type", "text/plain")
if self.path == '/set_cookie':
# the balancer tells the backend what's the name of the balancer cookie with
# the X-Balancer-Current-Cookie header.
self.send_header('Set-Cookie', '%s=anything' % self.headers['X-Balancer-Current-Cookie'])
# The name of this cookie is SERVERID
assert self.headers['X-Balancer-Current-Cookie'] == 'SERVERID'
self.end_headers()
self.wfile.write(server._name.encode('utf-8'))
log_message = logging.getLogger(__name__ + '.BalancerCookieHTTPServer').info
return RequestHandler
class TestBalancer(BalancerTestCase):
"""Check balancing capabilities
"""
__partition_reference__ = 'b'
@classmethod
def _getInstanceParameterDict(cls):
# type: () -> dict
parameter_dict = super(TestBalancer, cls)._getInstanceParameterDict()
# use two backend servers
parameter_dict['dummy_http_server'] = [
[cls.getManagedResource("backend_web_server1", BalancerCookieHTTPServer).netloc, 1, False],
[cls.getManagedResource("backend_web_server2", BalancerCookieHTTPServer).netloc, 1, False],
]
return parameter_dict
def test_balancer_round_robin(self):
# type: () -> None
# requests are by default balanced to both servers
self.assertEqual(
{requests.get(self.default_balancer_url, verify=False).text for _ in range(10)},
{'backend_web_server1', 'backend_web_server2'}
)
def test_balancer_server_down(self):
# type: () -> None
# if one backend is down, it is excluded from balancer
self.getManagedResource("backend_web_server2", BalancerCookieHTTPServer).close()
self.addCleanup(self.getManagedResource("backend_web_server2", BalancerCookieHTTPServer).open)
self.assertEqual(
{requests.get(self.default_balancer_url, verify=False).text for _ in range(10)},
{'backend_web_server1',}
)
def test_balancer_set_cookie(self):
# type: () -> None
# if backend provides a "SERVERID" cookie, balancer will overwrite it with the
# backend selected by balancing algorithm
self.assertIn(
requests.get(six.moves.urllib.parse.urljoin(self.default_balancer_url, '/set_cookie'), verify=False).cookies['SERVERID'],
('default-0', 'default-1'),
)
def test_balancer_respects_sticky_cookie(self):
# type: () -> None
# if request is made with the sticky cookie, the client stick on one balancer
cookies = dict(SERVERID='default-1')
self.assertEqual(
{requests.get(self.default_balancer_url, verify=False, cookies=cookies).text for _ in range(10)},
{'backend_web_server2',}
)
# if that backend becomes down, requests are balanced to another server
self.getManagedResource("backend_web_server2", BalancerCookieHTTPServer).close()
self.addCleanup(self.getManagedResource("backend_web_server2", BalancerCookieHTTPServer).open)
self.assertEqual(
requests.get(self.default_balancer_url, verify=False, cookies=cookies).text,
'backend_web_server1')
def test_balancer_stats_socket(self):
# type: () -> None
# real time statistics can be obtained by using the stats socket and there
# is a wrapper which makes this a bit easier.
socat_process = subprocess.Popen(
[self.computer_partition_root_path + '/bin/haproxy-socat-stats'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
try:
output, _ = socat_process.communicate(b"show stat\n")
except:
socat_process.kill()
socat_process.wait()
raise
self.assertEqual(socat_process.poll(), 0)
# output is a csv
self.assertIn(b'family_default,FRONTEND,', output)
class TestTestRunnerEntryPoints(BalancerTestCase):
"""Check balancer has some entries for test runner.
"""
__partition_reference__ = 't'
@classmethod
def _getInstanceParameterDict(cls):
# type: () -> dict
parameter_dict = super(
TestTestRunnerEntryPoints,
cls,
)._getInstanceParameterDict()
parameter_dict['dummy_http_server-test-runner-address-list'] = [
[
cls.getManagedResource("backend_0", EchoHTTPServer).hostname,
cls.getManagedResource("backend_0", EchoHTTPServer).port,
],
[
cls.getManagedResource("backend_1", EchoHTTPServer).hostname,
cls.getManagedResource("backend_1", EchoHTTPServer).port,
],
[
cls.getManagedResource("backend_2", EchoHTTPServer).hostname,
cls.getManagedResource("backend_2", EchoHTTPServer).port,
],
]
return parameter_dict
def test_use_proper_backend(self):
# type: () -> None
# requests are directed to proper backend based on URL path
test_runner_url_list = self.getRootPartitionConnectionParameterDict(
)['default-test-runner-url-list']
url_0, url_1, url_2 = test_runner_url_list
self.assertEqual(
six.moves.urllib.parse.urlparse(url_0).netloc,
six.moves.urllib.parse.urlparse(url_1).netloc)
self.assertEqual(
six.moves.urllib.parse.urlparse(url_0).netloc,
six.moves.urllib.parse.urlparse(url_2).netloc)
path_0 = '/VirtualHostBase/https/{netloc}/VirtualHostRoot/_vh_unit_test_0/something'.format(
netloc=six.moves.urllib.parse.urlparse(url_0).netloc)
path_1 = '/VirtualHostBase/https/{netloc}/VirtualHostRoot/_vh_unit_test_1/something'.format(
netloc=six.moves.urllib.parse.urlparse(url_0).netloc)
path_2 = '/VirtualHostBase/https/{netloc}/VirtualHostRoot/_vh_unit_test_2/something'.format(
netloc=six.moves.urllib.parse.urlparse(url_0).netloc)
self.assertEqual(
{
requests.get(url_0 + 'something', verify=False).json()['Path']
for _ in range(10)
}, {path_0})
self.assertEqual(
{
requests.get(url_1 + 'something', verify=False).json()['Path']
for _ in range(10)
}, {path_1})
self.assertEqual(
{
requests.get(url_2 + 'something', verify=False).json()['Path']
for _ in range(10)
}, {path_2})
# If a test runner backend is down, others can be accessed.
self.getManagedResource("backend_0", EchoHTTPServer).close()
self.assertEqual(
{
requests.get(url_0 + 'something', verify=False).status_code
for _ in range(5)
}, {503})
self.assertEqual(
{
requests.get(url_1 + 'something', verify=False).json()['Path']
for _ in range(10)
}, {path_1})
class TestHTTP(BalancerTestCase):
"""Check HTTP protocol with a HTTP/1.1 backend
"""
@classmethod
def _getInstanceParameterDict(cls):
# type: () -> dict
parameter_dict = super(TestHTTP, cls)._getInstanceParameterDict()
# use a HTTP/1.1 server instead
parameter_dict['dummy_http_server'] = [[cls.getManagedResource("HTTP/1.1 Server", EchoHTTP11Server).netloc, 1, False]]
return parameter_dict
__partition_reference__ = 'h'
def test_http_version(self):
# type: () -> None
self.assertEqual(
subprocess.check_output([
'curl',
'--silent',
'--show-error',
'--output',
'/dev/null',
'--insecure',
'--write-out',
'%{http_version}',
self.default_balancer_url,
]),
b'2',
)
def test_keep_alive(self):
# type: () -> None
# when doing two requests, connection is established only once
with requests.Session() as session:
session.verify = False
# do a first request, which establish a first connection
session.get(self.default_balancer_url).raise_for_status()
# "break" new connection method and check we can make another request
with mock.patch(
"requests.packages.urllib3.connectionpool.HTTPSConnectionPool._new_conn",
) as new_conn:
session.get(self.default_balancer_url).raise_for_status()
new_conn.assert_not_called()
parsed_url = six.moves.urllib.parse.urlparse(self.default_balancer_url)
# check that we have an open file for the ip connection
self.assertTrue([
c for c in psutil.Process(os.getpid()).connections()
if c.status == 'ESTABLISHED' and c.raddr.ip == parsed_url.hostname
and c.raddr.port == parsed_url.port
])
class ContentTypeHTTPServer(ManagedHTTPServer):
"""An HTTP/1.1 Server which reply with content type from path.
For example when requested http://host/text/plain it will reply
with Content-Type: text/plain header.
The body is always "OK"
"""
class RequestHandler(BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
def do_GET(self):
# type: () -> None
self.send_response(200)
if self.path == '/':
self.send_header("Content-Length", '0')
return self.end_headers()
content_type = self.path[1:]
body = b"OK"
self.send_header("Content-Type", content_type)
self.send_header("Content-Length", str(len(body)))
self.end_headers()
self.wfile.write(body)
log_message = logging.getLogger(__name__ + '.ContentTypeHTTPServer').info
class TestContentEncoding(BalancerTestCase):
"""Test how responses are gzip encoded or not depending on content type header.
"""
__partition_reference__ = 'ce'
@classmethod
def _getInstanceParameterDict(cls):
# type: () -> dict
parameter_dict = super(TestContentEncoding, cls)._getInstanceParameterDict()
parameter_dict['dummy_http_server'] = [
[cls.getManagedResource("content_type_server", ContentTypeHTTPServer).netloc, 1, False],
]
return parameter_dict
def test_gzip_encoding(self):
# type: () -> None
for content_type in (
'text/cache-manifest',
'text/html',
'text/plain',
'text/css',
'application/hal+json',
'application/json',
'application/x-javascript',
'text/xml',
'application/xml',
'application/rss+xml',
'text/javascript',
'application/javascript',
'image/svg+xml',
'application/x-font-ttf',
'application/font-woff',
'application/font-woff2',
'application/x-font-opentype',
'application/wasm',):
resp = requests.get(six.moves.urllib.parse.urljoin(self.default_balancer_url, content_type), verify=False)
self.assertEqual(resp.headers['Content-Type'], content_type)
self.assertEqual(
resp.headers.get('Content-Encoding'),
'gzip',
'%s uses wrong encoding: %s' % (content_type, resp.headers.get('Content-Encoding')))
self.assertEqual(resp.text, 'OK')
def test_no_gzip_encoding(self):
# type: () -> None
resp = requests.get(six.moves.urllib.parse.urljoin(self.default_balancer_url, '/image/png'), verify=False)
self.assertNotIn('Content-Encoding', resp.headers)
self.assertEqual(resp.text, 'OK')
class CaucaseCertificate(ManagedResource):
"""A certificate signed by a caucase service.
"""
ca_crt_file = None # type: str
crl_file = None # type: str
csr_file = None # type: str
cert_file = None # type: str
key_file = None # type: str
def open(self):
# type: () -> None
self.tmpdir = tempfile.mkdtemp()
self.ca_crt_file = os.path.join(self.tmpdir, 'ca-crt.pem')
self.crl_file = os.path.join(self.tmpdir, 'ca-crl.pem')
self.csr_file = os.path.join(self.tmpdir, 'csr.pem')
self.cert_file = os.path.join(self.tmpdir, 'crt.pem')
self.key_file = os.path.join(self.tmpdir, 'key.pem')
def close(self):
# type: () -> None
shutil.rmtree(self.tmpdir)
@property
def _caucase_path(self):
# type: () -> str
"""path of caucase executable.
"""
software_release_root_path = os.path.join(
self._cls.slap._software_root,
hashlib.md5(self._cls.getSoftwareURL().encode()).hexdigest(),
)
return os.path.join(software_release_root_path, 'bin', 'caucase')
def request(self, common_name, caucase):
# type: (str, CaucaseService) -> None
"""Generate certificate and request signature to the caucase service.
This overwrite any previously requested certificate for this instance.
"""
cas_args = [
self._caucase_path,
'--ca-url', caucase.url,
'--ca-crt', self.ca_crt_file,
'--crl', self.crl_file,
]
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
with open(self.key_file, 'wb') as f:
f.write(
key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
))
csr = x509.CertificateSigningRequestBuilder().subject_name(
x509.Name([
x509.NameAttribute(
NameOID.COMMON_NAME,
common_name,
),
])).sign(
key,
hashes.SHA256(),
default_backend(),
)
with open(self.csr_file, 'wb') as f:
f.write(csr.public_bytes(serialization.Encoding.PEM))
csr_id = subprocess.check_output(
cas_args + [
'--send-csr', self.csr_file,
],
).split()[0].decode()
assert csr_id
for _ in range(30):
if not subprocess.call(
cas_args + [
'--get-crt', csr_id, self.cert_file,
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) == 0:
break
else:
time.sleep(1)
else:
raise RuntimeError('getting service certificate failed.')
with open(self.cert_file) as cert_file:
assert 'BEGIN CERTIFICATE' in cert_file.read()
def revoke(self, caucase):
# type: (CaucaseService) -> None
"""Revoke the client certificate on this caucase instance.
"""
subprocess.check_call([
self._caucase_path,
'--ca-url', caucase.url,
'--ca-crt', self.ca_crt_file,
'--crl', self.crl_file,
'--revoke-crt', self.cert_file, self.key_file,
])
class TestFrontendXForwardedFor(BalancerTestCase):
__partition_reference__ = 'xff'
@classmethod
def _getInstanceParameterDict(cls):
# type: () -> dict
frontend_caucase = cls.getManagedResource('frontend_caucase', CaucaseService)
certificate = cls.getManagedResource('client_certificate', CaucaseCertificate)
certificate.request(u'shared frontend', frontend_caucase)
parameter_dict = super(TestFrontendXForwardedFor, cls)._getInstanceParameterDict()
# add another "-auth" backend, that will have ssl-authentication enabled
parameter_dict['zope-family-dict']['default-auth'] = ['dummy_http_server']
parameter_dict['backend-path-dict']['default-auth'] = '/'
parameter_dict['ssl-authentication-dict'] = {
'default': False,
'default-auth': True,
}
parameter_dict['ssl']['frontend-caucase-url-list'] = [frontend_caucase.url]
return parameter_dict
def test_x_forwarded_for_added_when_verified_connection(self):
# type: () -> None
client_certificate = self.getManagedResource('client_certificate', CaucaseCertificate)
for backend in ('default', 'default-auth'):
balancer_url = json.loads(self.computer_partition.getConnectionParameterDict()['_'])[backend]
result = requests.get(
balancer_url,
headers={'X-Forwarded-For': '1.2.3.4'},
cert=(client_certificate.cert_file, client_certificate.key_file),
verify=False,
).json()
self.assertEqual(result['Incoming Headers'].get('x-forwarded-for', '').split(', ')[0], '1.2.3.4')
def test_x_forwarded_for_stripped_when_not_verified_connection(self):
# type: () -> None
balancer_url = json.loads(self.computer_partition.getConnectionParameterDict()['_'])['default']
result = requests.get(
balancer_url,
headers={'X-Forwarded-For': '1.2.3.4'},
verify=False,
).json()
self.assertNotEqual(result['Incoming Headers'].get('x-forwarded-for', '').split(', ')[0], '1.2.3.4')
balancer_url = json.loads(self.computer_partition.getConnectionParameterDict()['_'])['default-auth']
with self.assertRaisesRegexp(Exception, "certificate required"):
requests.get(
balancer_url,
headers={'X-Forwarded-For': '1.2.3.4'},
verify=False,
)
class TestServerTLSProvidedCertificate(BalancerTestCase):
"""Check that certificate and key can be provided as instance parameters.
"""
__partition_reference__ = 's'
@classmethod
def _getInstanceParameterDict(cls):
# type: () -> dict
server_caucase = cls.getManagedResource('server_caucase', CaucaseService)
server_certificate = cls.getManagedResource('server_certificate', CaucaseCertificate)
server_certificate.request(six.ensure_text(cls._ipv4_address), server_caucase)
parameter_dict = super(TestServerTLSProvidedCertificate, cls)._getInstanceParameterDict()
with open(server_certificate.cert_file) as f:
parameter_dict['ssl']['cert'] = f.read()
with open(server_certificate.key_file) as f:
parameter_dict['ssl']['key'] = f.read()
return parameter_dict
def test_certificate_validates_with_provided_ca(self):
# type: () -> None
server_certificate = self.getManagedResource("server_certificate", CaucaseCertificate)
requests.get(self.default_balancer_url, verify=server_certificate.ca_crt_file)
class TestClientTLS(BalancerTestCase):
__partition_reference__ = 'c'
@classmethod
def _getInstanceParameterDict(cls):
# type: () -> dict
frontend_caucase1 = cls.getManagedResource('frontend_caucase1', CaucaseService)
certificate1 = cls.getManagedResource('client_certificate1', CaucaseCertificate)
certificate1.request(u'client_certificate1', frontend_caucase1)
frontend_caucase2 = cls.getManagedResource('frontend_caucase2', CaucaseService)
certificate2 = cls.getManagedResource('client_certificate2', CaucaseCertificate)
certificate2.request(u'client_certificate2', frontend_caucase2)
parameter_dict = super(TestClientTLS, cls)._getInstanceParameterDict()
parameter_dict['ssl-authentication-dict'] = {
'default': True,
}
parameter_dict['ssl']['frontend-caucase-url-list'] = [
frontend_caucase1.url,
frontend_caucase2.url,
]
return parameter_dict
def test_refresh_crl(self):
# type: () -> None
logger = self.logger
class DebugLogFile:
def write(self, msg):
logger.info("output from caucase_updater: %s", msg)
def flush(self):
pass
for client_certificate_name, caucase_name in (
('client_certificate1', 'frontend_caucase1'),
('client_certificate2', 'frontend_caucase2'),
):
client_certificate = self.getManagedResource(client_certificate_name,
CaucaseCertificate)
# when client certificate can be authenticated, backend receive the CN of
# the client certificate in "remote-user" header
def _make_request():
# type: () -> dict
return requests.get(
self.default_balancer_url,
cert=(client_certificate.cert_file, client_certificate.key_file),
verify=False,
).json()
self.assertEqual(_make_request()['Incoming Headers'].get('remote-user'),
client_certificate_name)
# when certificate is revoked, updater service should update the CRL
# used by balancer from the caucase service used for client certificates
# (ie. the one used by frontend).
caucase = self.getManagedResource(caucase_name, CaucaseService)
client_certificate.revoke(caucase)
# until the CRL is updated, the client certificate is still accepted.
self.assertEqual(_make_request()['Incoming Headers'].get('remote-user'),
client_certificate_name)
# We have two services, in charge of updating CRL and CA certificates for
# each frontend CA
caucase_updater_list = glob.glob(
os.path.join(
self.computer_partition_root_path,
'etc',
'service',
'caucase-updater-*',
))
self.assertEqual(len(caucase_updater_list), 2)
# find the one corresponding to this caucase
for caucase_updater_candidate in caucase_updater_list:
with open(caucase_updater_candidate) as f:
if caucase.url in f.read():
caucase_updater = caucase_updater_candidate
break
else:
self.fail("Could not find caucase updater script for %s" % caucase.url)
# simulate running updater service in the future, to confirm that it fetches
# the new CRL and make sure balancer uses that new CRL.
process = pexpect.spawnu("faketime +1day %s" % caucase_updater)
process.logfile = DebugLogFile()
process.expect(u"Got new CRL.*Next wake-up at.*")
process.terminate()
process.wait()
with self.assertRaisesRegexp(Exception, 'certificate revoked'):
_make_request()
class TestPathBasedRouting(BalancerTestCase):
"""Check path-based routing rewrites URLs as expected.
"""
__partition_reference__ = 'pbr'
@classmethod
def _getInstanceParameterDict(cls):
# type: () -> dict
parameter_dict = super(
TestPathBasedRouting,
cls,
)._getInstanceParameterDict()
parameter_dict['zope-family-dict'][
'second'
] = parameter_dict['zope-family-dict'][
'default'
]
# Routing rules outermost slashes mean nothing. They are internally
# stripped and rebuilt in order to correctly represent the request's URL.
parameter_dict['family-path-routing-dict'] = {
'default': [
['foo/bar', 'erp5/boo/far/faz'], # no outermost slashes
['/foo', '/erp5/somewhere'],
['/foo/shadowed', '/foo_shadowed'], # unreachable
['/next', '/erp5/web_site_module/another_next_website'],
],
}
parameter_dict['path-routing-list'] = [
['/next', '/erp5/web_site_module/the_next_website'],
['/next2', '/erp5/web_site_module/the_next2_website'],
['//', '//erp5/web_site_module/123//'], # extraneous slashes
]
return parameter_dict
def test_routing(self):
# type: () -> None
published_dict = json.loads(self.computer_partition.getConnectionParameterDict()['_'])
scheme = 'scheme'
netloc = 'example.com:8080'
prefix = '/VirtualHostBase/' + scheme + '//' + six.moves.urllib.parse.quote(
netloc,
safe='',
)
# For easier reading of test data, visually separating the virtual host
# base from the virtual host root
vhr = '/VirtualHostRoot'
def assertRoutingEqual(family, path, expected_path):
# type: (str, str, str) -> None
# sanity check: unlike the rules, this test is sensitive to outermost
# slashes, and paths must be absolute-ish for code simplicity.
assert path.startswith('/')
# Frontend is expected to provide URLs with the following path structure:
# /VirtualHostBase////VirtualHostRoot
# where:
# - scheme is the user-input scheme
# - netloc is the user-input netloc
# - path is the user-input path
# Someday, frontends will instead propagate scheme and netloc via other
# means (likely: HTTP headers), in which case this test and the SR will
# need to be amended to reconstruct Virtual Host urls itself, and this
# test will need to be updated accordingly.
self.assertEqual(
requests.get(
six.moves.urllib.parse.urljoin(published_dict[family], prefix + vhr + path),
verify=False,
).json()['Path'],
expected_path,
)
# Trailing slash presence is preserved.
assertRoutingEqual('default', '/foo/bar', prefix + '/erp5/boo/far/faz' + vhr + '/_vh_foo/_vh_bar')
assertRoutingEqual('default', '/foo/bar/', prefix + '/erp5/boo/far/faz' + vhr + '/_vh_foo/_vh_bar/')
# Subpaths are preserved.
assertRoutingEqual('default', '/foo/bar/hey', prefix + '/erp5/boo/far/faz' + vhr + '/_vh_foo/_vh_bar/hey')
# Rule precedence: later less-specific rules are applied.
assertRoutingEqual('default', '/foo', prefix + '/erp5/somewhere' + vhr + '/_vh_foo')
assertRoutingEqual('default', '/foo/', prefix + '/erp5/somewhere' + vhr + '/_vh_foo/')
assertRoutingEqual('default', '/foo/baz', prefix + '/erp5/somewhere' + vhr + '/_vh_foo/baz')
# Rule precedence: later more-specific rules are meaningless.
assertRoutingEqual('default', '/foo/shadowed', prefix + '/erp5/somewhere' + vhr + '/_vh_foo/shadowed')
# Rule precedence: family rules applied before general rules.
assertRoutingEqual('default', '/next', prefix + '/erp5/web_site_module/another_next_website' + vhr + '/_vh_next')
# Fallback on general rules when no family-specific rule matches
# Note: the root is special in that there is always a trailing slash in the
# produced URL.
assertRoutingEqual('default', '/', prefix + '/erp5/web_site_module/123' + vhr + '/')
# Rule-less family reach general rules.
assertRoutingEqual('second', '/foo/bar', prefix + '/erp5/web_site_module/123' + vhr + '/foo/bar') # Rules match whole-elements, so the rule order does not matter to
# elements which share a common prefix.
assertRoutingEqual('second', '/next', prefix + '/erp5/web_site_module/the_next_website' + vhr + '/_vh_next')
assertRoutingEqual('second', '/next2', prefix + '/erp5/web_site_module/the_next2_website' + vhr + '/_vh_next2')
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/test/test/test_erp5.py 0000664 0000000 0000000 00000066213 14241130220 0030664 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from __future__ import absolute_import
import contextlib
import glob
import json
import os
import shutil
import socket
import ssl
import subprocess
import sys
import tempfile
import time
import unittest
import psutil
import requests
import six
import six.moves.urllib.parse
import six.moves.xmlrpc_client
import urllib3
from slapos.testing.utils import CrontabMixin
from . import ERP5InstanceTestCase, setUpModule
setUpModule # pyflakes
class TestPublishedURLIsReachableMixin(object):
"""Mixin that checks that default page of ERP5 is reachable.
"""
def _checkERP5IsReachable(self, base_url, site_id, verify):
# We access ERP5 trough a "virtual host", which should make
# ERP5 produce URLs using https://virtual-host-name:1234/virtual_host_root
# as base.
virtual_host_url = six.moves.urllib.parse.urljoin(
base_url,
'/VirtualHostBase/https/virtual-host-name:1234/{}/VirtualHostRoot/_vh_virtual_host_root/'
.format(site_id))
# What happens is that instantiation just create the services, but does not
# wait for ERP5 to be initialized. When this test run ERP5 instance is
# instantiated, but zope is still busy creating the site and haproxy replies
# with 503 Service Unavailable when zope is not started yet, with 404 when
# erp5 site is not created, with 500 when mysql is not yet reachable, so we
# configure this requests session to retry.
# XXX we should probably add a promise instead
with requests.Session() as session:
session.mount(
base_url,
requests.adapters.HTTPAdapter(
max_retries=urllib3.util.retry.Retry(
total=20,
backoff_factor=.5,
status_forcelist=(404, 500, 503))))
r = session.get(virtual_host_url, verify=verify, allow_redirects=False)
self.assertEqual(r.status_code, requests.codes.found)
# access on / are redirected to login form, with virtual host preserved
self.assertEqual(r.headers.get('location'), 'https://virtual-host-name:1234/virtual_host_root/login_form')
# login page can be rendered and contain the text "ERP5"
r = session.get(
six.moves.urllib.parse.urljoin(base_url, '{}/login_form'.format(site_id)),
verify=verify,
allow_redirects=False,
)
self.assertEqual(r.status_code, requests.codes.ok)
self.assertIn("ERP5", r.text)
def test_published_family_default_v6_is_reachable(self):
"""Tests the IPv6 URL published by the root partition is reachable.
"""
param_dict = self.getRootPartitionConnectionParameterDict()
self._checkERP5IsReachable(
param_dict['family-default-v6'],
param_dict['site-id'],
verify=False,
)
def test_published_family_default_v4_is_reachable(self):
"""Tests the IPv4 URL published by the root partition is reachable.
"""
param_dict = self.getRootPartitionConnectionParameterDict()
self._checkERP5IsReachable(
param_dict['family-default'],
param_dict['site-id'],
verify=False,
)
class TestDefaultParameters(ERP5InstanceTestCase, TestPublishedURLIsReachableMixin):
"""Test ERP5 can be instantiated with no parameters
"""
__partition_reference__ = 'defp'
class TestMedusa(ERP5InstanceTestCase, TestPublishedURLIsReachableMixin):
"""Test ERP5 Medusa server
"""
__partition_reference__ = 'medusa'
@classmethod
def getInstanceParameterDict(cls):
return {'_': json.dumps({'wsgi': False})}
class TestJupyter(ERP5InstanceTestCase, TestPublishedURLIsReachableMixin):
"""Test ERP5 Jupyter notebook
"""
__partition_reference__ = 'jupyter'
@classmethod
def getInstanceParameterDict(cls):
return {'_': json.dumps({'jupyter': {'enable': True}})}
def test_jupyter_notebook_is_reachable(self):
param_dict = self.getRootPartitionConnectionParameterDict()
self.assertEqual(
'https://[%s]:8888/tree' % self._ipv6_address,
param_dict['jupyter-url']
)
result = requests.get(
param_dict['jupyter-url'], verify=False, allow_redirects=False)
self.assertEqual(
[requests.codes.found, True, '/login?next=%2Ftree'],
[result.status_code, result.is_redirect, result.headers['Location']]
)
class TestBalancerPorts(ERP5InstanceTestCase):
"""Instantiate with two zope families, this should create for each family:
- a balancer entry point with corresponding haproxy
- a balancer entry point for test runner
"""
__partition_reference__ = 'ap'
@classmethod
def getInstanceParameterDict(cls):
return {
'_':
json.dumps({
"zope-partition-dict": {
"family1": {
"instance-count": 3,
"family": "family1"
},
"family2": {
"instance-count": 5,
"family": "family2"
},
},
})
}
def checkValidHTTPSURL(self, url):
parsed = six.moves.urllib.parse.urlparse(url)
self.assertEqual(parsed.scheme, 'https')
self.assertTrue(parsed.hostname)
self.assertTrue(parsed.port)
def test_published_family_parameters(self):
# when we request two families, we have two published family-{family_name} URLs
param_dict = self.getRootPartitionConnectionParameterDict()
for family_name in ('family1', 'family2'):
self.checkValidHTTPSURL(
param_dict['family-{family_name}'.format(family_name=family_name)])
self.checkValidHTTPSURL(
param_dict['family-{family_name}-v6'.format(family_name=family_name)])
def test_published_test_runner_url(self):
# each family's also a list of test test runner URLs, by default 3 per family
param_dict = self.getRootPartitionConnectionParameterDict()
for family_name in ('family1', 'family2'):
family_test_runner_url_list = param_dict[
'{family_name}-test-runner-url-list'.format(family_name=family_name)]
self.assertEqual(3, len(family_test_runner_url_list))
for url in family_test_runner_url_list:
self.checkValidHTTPSURL(url)
def test_zope_listen(self):
# we requested 3 zope in family1 and 5 zopes in family2, we should have 8 zope running.
with self.slap.instance_supervisor_rpc as supervisor:
all_process_info = supervisor.getAllProcessInfo()
self.assertEqual(
3 + 5,
len([p for p in all_process_info if p['name'].startswith('zope-')]))
def test_haproxy_listen(self):
# We have 2 families, haproxy should listen to a total of 3 ports per family
# normal access on ipv4 and ipv6 and test runner access on ipv4 only
with self.slap.instance_supervisor_rpc as supervisor:
all_process_info = supervisor.getAllProcessInfo()
process_info, = [p for p in all_process_info if p['name'].startswith('haproxy-')]
haproxy_master_process = psutil.Process(process_info['pid'])
haproxy_worker_process, = haproxy_master_process.children()
self.assertEqual(
sorted([socket.AF_INET] * 4 + [socket.AF_INET6] * 2),
sorted([
c.family
for c in haproxy_worker_process.connections()
if c.status == 'LISTEN'
]))
class TestSeleniumTestRunner(ERP5InstanceTestCase, TestPublishedURLIsReachableMixin):
"""Test ERP5 can be instantiated with selenium server for test runner.
"""
__partition_reference__ = 'sel'
@classmethod
def getInstanceParameterDict(cls):
return {
'_':
json.dumps({
'test-runner': {
'selenium': {
"target": "selenium-server",
"server-url": "https://example.com",
"verify-server-certificate": False,
"desired-capabilities": {
"browserName": "firefox",
"version": "68.0.2esr",
}
}
}
})
}
def test_test_runner_configuration_json_file(self):
runUnitTest_script, = glob.glob(
self.computer_partition_root_path + "/../*/bin/runUnitTest.real")
config_file = None
with open(runUnitTest_script) as f:
for line in f:
if 'ERP5_TEST_RUNNER_CONFIGURATION' in line:
_, config_file = line.split('=')
assert config_file
with open(config_file.strip()) as f:
self.assertEqual(
f.read(),
json.dumps(json.loads(self.getInstanceParameterDict()['_'])['test-runner'], sort_keys=True))
class TestDisableTestRunner(ERP5InstanceTestCase, TestPublishedURLIsReachableMixin):
"""Test ERP5 can be instantiated without test runner.
"""
__partition_reference__ = 'distr'
@classmethod
def getInstanceParameterDict(cls):
return {'_': json.dumps({'test-runner': {'enabled': False}})}
def test_no_runUnitTestScript(self):
"""No runUnitTest script should be generated in any partition.
"""
# self.computer_partition_root_path is the path of root partition.
# we want to assert that no scripts exist in any partition.
bin_programs = list(map(os.path.basename,
glob.glob(self.computer_partition_root_path + "/../*/bin/*")))
self.assertTrue(bin_programs) # just to check the glob was correct.
self.assertNotIn('runUnitTest', bin_programs)
self.assertNotIn('runTestSuite', bin_programs)
def test_no_haproxy_testrunner_port(self):
# Haproxy only listen on two ports, there is no haproxy ports allocated for test runner
with self.slap.instance_supervisor_rpc as supervisor:
all_process_info = supervisor.getAllProcessInfo()
process_info, = [p for p in all_process_info if p['name'].startswith('haproxy')]
haproxy_master_process = psutil.Process(process_info['pid'])
haproxy_worker_process, = haproxy_master_process.children()
self.assertEqual(
sorted([socket.AF_INET, socket.AF_INET6]),
sorted(
c.family
for c in haproxy_worker_process.connections()
if c.status == 'LISTEN'
))
class TestZopeNodeParameterOverride(ERP5InstanceTestCase, TestPublishedURLIsReachableMixin):
"""Test override zope node parameters
"""
__partition_reference__ = 'override'
@classmethod
def getInstanceParameterDict(cls):
# The following example includes the most commonly used options,
# but not necessarily in a meaningful way.
return {'_': json.dumps({
"zodb": [{
"type": "zeo",
"server": {},
"cache-size-bytes": "20MB",
"cache-size-bytes!": [
("bb-0", 1<<20),
("bb-.*", "500MB"),
],
"pool-timeout": "10m",
"storage-dict": {
"cache-size!": [
("a-.*", "50MB"),
],
},
}],
"zope-partition-dict": {
"a": {
"instance-count": 3,
},
"bb": {
"instance-count": 5,
"port-base": 2300,
},
},
})}
def test_zope_conf(self):
zeo_addr = json.loads(
self.getComputerPartition('zodb').getConnectionParameter('_')
)["storage-dict"]["root"]["server"]
def checkParameter(line, kw):
k, v = line.split()
self.assertFalse(k.endswith('!'), k)
try:
expected = kw.pop(k)
except KeyError:
if k == 'server':
return
self.assertIsNotNone(expected)
self.assertEqual(str(expected), v)
def checkConf(zodb, storage):
zodb["mount-point"] = "/"
zodb["pool-size"] = 4
zodb["pool-timeout"] = "10m"
storage["storage"] = "root"
storage["server"] = zeo_addr
with open('%s/etc/zope-%s.conf' % (partition, zope)) as f:
conf = list(map(str.strip, f.readlines()))
i = conf.index("") + 1
conf = iter(conf[i:conf.index(" ", i)])
for line in conf:
if line == '':
for line in conf:
if line == ' ':
break
checkParameter(line, storage)
for k, v in six.iteritems(storage):
self.assertIsNone(v, k)
del storage
else:
checkParameter(line, zodb)
for k, v in six.iteritems(zodb):
self.assertIsNone(v, k)
partition = self.getComputerPartitionPath('zope-a')
for zope in range(3):
checkConf({
"cache-size-bytes": "20MB",
}, {
"cache-size": "50MB",
})
partition = self.getComputerPartitionPath('zope-bb')
for zope in range(5):
checkConf({
"cache-size-bytes": "500MB" if zope else 1<<20,
}, {
"cache-size": None,
})
class TestWatchActivities(ERP5InstanceTestCase):
"""Tests for bin/watch_activities scripts in zope partitions.
"""
__partition_reference__ = 'wa'
def test(self):
# "watch_activites" scripts use watch command. We'll fake a watch command
# that executes the actual command only once to check the output.
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
with open(os.path.join(tmpdir, 'watch'), 'w') as f:
f.write("""#!/bin/sh
if [ "$1" != "-n" ] || [ "$2" != "5" ]
then
echo unexpected arguments: "$1" "$2"
exit 1
fi
shift
shift
exec bash -c "$@"
""")
os.fchmod(f.fileno(), 0o700)
try:
output = subprocess.check_output(
[
os.path.join(
self.getComputerPartitionPath('zope-1'),
'bin',
'watch_activities',
)
],
env=dict(os.environ,
PATH=os.pathsep.join([tmpdir, os.environ['PATH']])),
stderr=subprocess.STDOUT,
universal_newlines=True,
)
except subprocess.CalledProcessError as e:
self.fail(e.output)
self.assertIn(' dict ', output)
class ZopeTestMixin(CrontabMixin):
"""Mixin class for zope features.
"""
wsgi = NotImplemented # type: bool
__partition_reference__ = 'z'
@classmethod
def getInstanceParameterDict(cls):
return {
'_':
json.dumps({
"zope-partition-dict": {
"default": {
"longrequest-logger-interval": 1,
"longrequest-logger-timeout": 1,
},
},
"wsgi": cls.wsgi,
})
}
@classmethod
def _setUpClass(cls):
super(ZopeTestMixin, cls)._setUpClass()
param_dict = cls.getRootPartitionConnectionParameterDict()
# rebuild an url with user and password
parsed = six.moves.urllib.parse.urlparse(param_dict['family-default'])
cls.zope_base_url = parsed._replace(
netloc='{}:{}@{}:{}'.format(
param_dict['inituser-login'],
param_dict['inituser-password'],
parsed.hostname,
parsed.port,
),
path=param_dict['site-id'] + '/',
).geturl()
cls.zope_deadlock_debugger_url = six.moves.urllib_parse.urljoin(
cls.zope_base_url,
'/manage_debug_threads?{deadlock-debugger-password}'.format(
**param_dict),
)
@contextlib.contextmanager
def getXMLRPCClient():
# don't verify certificate
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
erp5_xmlrpc_client = six.moves.xmlrpc_client.ServerProxy(
cls.zope_base_url,
context=ssl_context,
)
# BBB use as a context manager only on python3
if sys.version_info < (3, ):
yield erp5_xmlrpc_client
else:
with erp5_xmlrpc_client:
yield erp5_xmlrpc_client
with getXMLRPCClient() as erp5_xmlrpc_client:
# wait for ERP5 to be ready (TODO: this should probably be a promise)
for _ in range(120):
time.sleep(1)
try:
erp5_xmlrpc_client.getTitle()
except (six.moves.xmlrpc_client.ProtocolError,
six.moves.xmlrpc_client.Fault):
pass
else:
break
def addPythonScript(script_id, params, body):
with getXMLRPCClient() as erp5_xmlrpc_client:
custom = erp5_xmlrpc_client.portal_skins.custom
try:
custom.manage_addProduct.PythonScripts.manage_addPythonScript(
script_id)
except six.moves.xmlrpc_client.ProtocolError as e:
if e.errcode != 302:
raise
getattr(custom, script_id).ZPythonScriptHTML_editAction(
'',
'',
params,
body,
)
# a python script to verify activity processing
addPythonScript(
script_id='ERP5Site_verifyActivityProcessing',
params='mode',
body='''if 1:
import json
portal = context.getPortalObject()
if mode == "count":
return json.dumps(dict(count=len(portal.portal_activities.getMessageList())))
if mode == "activate":
for _ in range(10):
portal.portal_templates.activate(activity="SQLQueue").getTitle()
return "activated"
raise ValueError("Unknown mode: %s" % mode)
''',
)
cls.zope_verify_activity_processing_url = six.moves.urllib_parse.urljoin(
cls.zope_base_url,
'ERP5Site_verifyActivityProcessing',
)
# a python script logging to event log
addPythonScript(
script_id='ERP5Site_logMessage',
params='name',
body='''if 1:
from erp5.component.module.Log import log
return log("hello %s" % name)
''',
)
cls.zope_log_message_url = six.moves.urllib_parse.urljoin(
cls.zope_base_url,
'ERP5Site_logMessage',
)
# a python script issuing a long request
addPythonScript(
script_id='ERP5Site_executeLongRequest',
params='',
body='''if 1:
import time
for _ in range(5):
time.sleep(1)
return "done"
''',
)
cls.zope_long_request_url = six.moves.urllib_parse.urljoin(
cls.zope_base_url,
'ERP5Site_executeLongRequest',
)
def setUp(self):
super(ZopeTestMixin, self).setUp()
# run logrotate a first time so that it create state files
self._executeCrontabAtDate('logrotate', '2000-01-01')
def tearDown(self):
super(ZopeTestMixin, self).tearDown()
# reset logrotate status
logrotate_status = os.path.join(
self.getComputerPartitionPath('zope-default'),
'srv',
'logrotate.status',
)
if os.path.exists(logrotate_status):
os.unlink(logrotate_status)
for logfile in glob.glob(
os.path.join(
self.getComputerPartitionPath('zope-default'),
'srv',
'backup',
'logrotate',
'*',
)):
os.unlink(logfile)
for logfile in glob.glob(
os.path.join(
self.getComputerPartitionPath('zope-default'),
'srv',
'monitor',
'private',
'documents',
'*',
)):
os.unlink(logfile)
def _getCrontabCommand(self, crontab_name):
# type: (str) -> str
"""Read a crontab and return the command that is executed.
overloaded to use crontab from zope partition
"""
with open(
os.path.join(
self.getComputerPartitionPath('zope-default'),
'etc',
'cron.d',
crontab_name,
)) as f:
crontab_spec, = f.readlines()
self.assertNotEqual(crontab_spec[0], '@', crontab_spec)
return crontab_spec.split(None, 5)[-1]
def test_event_log_rotation(self):
requests.get(
self.zope_log_message_url,
params={
"name": "world"
},
verify=False,
).raise_for_status()
zope_event_log_path = os.path.join(
self.getComputerPartitionPath('zope-default'),
'var',
'log',
'zope-0-event.log',
)
with open(zope_event_log_path) as f:
self.assertIn('hello world', f.read())
self._executeCrontabAtDate('logrotate', '2050-01-01')
# this logrotate leaves the log for the day as non compressed
rotated_log_file = os.path.join(
self.getComputerPartitionPath('zope-default'),
'srv',
'backup',
'logrotate',
'zope-0-event.log-20500101',
)
with open(rotated_log_file) as f:
self.assertIn('hello world', f.read())
requests.get(
self.zope_log_message_url,
params={
"name": "le monde"
},
verify=False,
).raise_for_status()
with open(zope_event_log_path) as f:
self.assertNotIn('hello world', f.read())
with open(zope_event_log_path) as f:
self.assertIn('hello le monde', f.read())
# on next day execution of logrotate, log files are compressed
self._executeCrontabAtDate('logrotate', '2050-01-02')
self.assertTrue(os.path.exists(rotated_log_file + '.xz'))
self.assertFalse(os.path.exists(rotated_log_file))
def test_access_log_rotation(self):
requests.get(
self.zope_base_url,
verify=False,
headers={
'User-Agent': 'before rotation'
},
).raise_for_status()
zope_access_log_path = os.path.join(
self.getComputerPartitionPath('zope-default'),
'var',
'log',
'zope-0-Z2.log',
)
with open(zope_access_log_path) as f:
self.assertIn('before rotation', f.read())
self._executeCrontabAtDate('logrotate', '2050-01-01')
# this logrotate leaves the log for the day as non compressed
rotated_log_file = os.path.join(
self.getComputerPartitionPath('zope-default'),
'srv',
'backup',
'logrotate',
'zope-0-Z2.log-20500101',
)
with open(rotated_log_file) as f:
self.assertIn('before rotation', f.read())
requests.get(
self.zope_base_url,
verify=False,
headers={
'User-Agent': 'after rotation'
},
).raise_for_status()
with open(zope_access_log_path) as f:
self.assertNotIn('before rotation', f.read())
with open(zope_access_log_path) as f:
self.assertIn('after rotation', f.read())
# on next day execution of logrotate, log files are compressed
self._executeCrontabAtDate('logrotate', '2050-01-02')
self.assertTrue(os.path.exists(rotated_log_file + '.xz'))
self.assertFalse(os.path.exists(rotated_log_file))
def test_long_request_log_rotation(self):
requests.get(self.zope_long_request_url,
verify=False,
params={
'when': 'before rotation'
}).raise_for_status()
zope_long_request_log_path = os.path.join(
self.getComputerPartitionPath('zope-default'),
'var',
'log',
'longrequest_logger_zope-0.log',
)
with open(zope_long_request_log_path) as f:
self.assertIn('before rotation', f.read())
self._executeCrontabAtDate('logrotate', '2050-01-01')
# this logrotate leaves the log for the day as non compressed
rotated_log_file = os.path.join(
self.getComputerPartitionPath('zope-default'),
'srv',
'backup',
'logrotate',
'longrequest_logger_zope-0.log-20500101',
)
with open(rotated_log_file) as f:
self.assertIn('before rotation', f.read())
requests.get(
self.zope_long_request_url,
verify=False,
params={
'when': 'after rotation'
},
).raise_for_status()
with open(zope_long_request_log_path) as f:
self.assertNotIn('before rotation', f.read())
with open(zope_long_request_log_path) as f:
self.assertIn('after rotation', f.read())
# on next day execution of logrotate, log files are compressed
self._executeCrontabAtDate('logrotate', '2050-01-02')
self.assertTrue(os.path.exists(rotated_log_file + '.xz'))
self.assertFalse(os.path.exists(rotated_log_file))
def test_basic_authentication_user_in_access_log(self):
param_dict = self.getRootPartitionConnectionParameterDict()
requests.get(self.zope_base_url,
verify=False,
auth=requests.auth.HTTPBasicAuth(
param_dict['inituser-login'],
param_dict['inituser-password'],
)).raise_for_status()
zope_access_log_path = os.path.join(
self.getComputerPartitionPath('zope-default'),
'var',
'log',
'zope-0-Z2.log',
)
with open(zope_access_log_path) as f:
self.assertIn(param_dict['inituser-login'], f.read())
def test_deadlock_debugger(self):
dump_response = requests.get(
self.zope_deadlock_debugger_url,
verify=False,
)
dump_response.raise_for_status()
self.assertIn('Thread ', dump_response.text)
def test_activity_processing(self):
def wait_for_activities(max_retries):
for retry in range(max_retries):
time.sleep(10)
resp = requests.get(
self.zope_verify_activity_processing_url,
params={
'mode': 'count',
'retry': retry,
},
verify=False,
)
if not resp.ok:
# XXX we start by flushing existing activities from site creation
# and inital upgrader run. During this time it may happen that
# ERP5 replies with site errors, we tolerate these errors and only
# check the final state.
continue
count = resp.json()['count']
if not count:
break
else:
self.assertEqual(count, 0)
wait_for_activities(60)
requests.get(
self.zope_verify_activity_processing_url,
params={
'mode': 'activate'
},
verify=False,
).raise_for_status()
wait_for_activities(10)
class TestZopeMedusa(ZopeTestMixin, ERP5InstanceTestCase):
wsgi = False
class TestZopeWSGI(ZopeTestMixin, ERP5InstanceTestCase):
wsgi = True
@unittest.expectedFailure
def test_long_request_log_rotation(self):
super(TestZopeWSGI, self).test_long_request_log_rotation(self)
@unittest.expectedFailure
def test_basic_authentication_user_in_access_log(self):
super(TestZopeWSGI, self).test_basic_authentication_user_in_access_log(self)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/test/test/test_mariadb.py 0000664 0000000 0000000 00000031412 14241130220 0031401 0 ustar 00root root 0000000 0000000 ##############################################################################
# coding: utf-8
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from __future__ import absolute_import
import os
import json
import glob
import six.moves.urllib.parse
import socket
import sys
import time
import contextlib
import datetime
import subprocess
import gzip
from backports import lzma
import MySQLdb
from slapos.testing.utils import CrontabMixin
from slapos.testing.utils import getPromisePluginParameterDict
from . import ERP5InstanceTestCase
from . import setUpModule
setUpModule # pyflakes
class MariaDBTestCase(ERP5InstanceTestCase):
"""Base test case for mariadb tests.
"""
__partition_reference__ = 'm'
@classmethod
def getInstanceSoftwareType(cls):
return "mariadb"
@classmethod
def _getInstanceParameterDict(cls):
# type: () -> dict
return {
'tcpv4-port': 3306,
'max-connection-count': 5,
'long-query-time': 3,
'max-slowqueries-threshold': 1,
'slowest-query-threshold': 0.1,
# XXX what is this ? should probably not be needed here
'name': cls.__name__,
'monitor-passwd': 'secret',
# XXX should probably not be needed here
'computer-memory-percent-threshold': 100,
}
@classmethod
def getInstanceParameterDict(cls):
# type: () -> dict
return {'_': json.dumps(cls._getInstanceParameterDict())}
def getDatabaseConnection(self):
# type: () -> MySQLdb.connections.Connection
connection_parameter_dict = json.loads(
self.computer_partition.getConnectionParameterDict()['_'])
db_url = six.moves.urllib.parse.urlparse(connection_parameter_dict['database-list'][0])
self.assertEqual('mysql', db_url.scheme)
self.assertTrue(db_url.path.startswith('/'))
database_name = db_url.path[1:]
return MySQLdb.connect(
user=db_url.username,
passwd=db_url.password,
host=db_url.hostname,
port=db_url.port,
db=database_name,
use_unicode=True,
charset='utf8mb4'
)
class TestCrontabs(MariaDBTestCase, CrontabMixin):
def test_full_backup(self):
# type: () -> None
self._executeCrontabAtDate('mariadb-backup', '2050-01-01')
with gzip.open(
os.path.join(
self.computer_partition_root_path,
'srv',
'backup',
'mariadb-full',
'20500101000000.sql.gz',
),
'rt') as dump:
self.assertIn('CREATE TABLE', dump.read())
def test_logrotate_and_slow_query_digest(self):
# type: () -> None
# slow query digest needs to run after logrotate, since it operates on the rotated
# file, so this tests both logrotate and slow query digest.
# run logrotate a first time so that it create state files
self._executeCrontabAtDate('logrotate', '2000-01-01')
# make two slow queries. We are using long-query-time=3, so the queries
# must take more than 3 seconds to be logged.
cnx = self.getDatabaseConnection()
with contextlib.closing(cnx):
cnx.query("SELECT SLEEP(3.1)")
cnx.store_result()
cnx.query("SELECT SLEEP(3.2)")
# slow query crontab depends on crontab for log rotation
# to be executed first.
self._executeCrontabAtDate('logrotate', '2050-01-01')
# this logrotate leaves the log for the day as non compressed
rotated_log_file = os.path.join(
self.computer_partition_root_path,
'srv',
'backup',
'logrotate',
'mariadb_slowquery.log-20500101',
)
self.assertTrue(os.path.exists(rotated_log_file))
# then crontab to generate slow query report is executed
self._executeCrontabAtDate('generate-mariadb-slow-query-report', '2050-01-01')
# and it creates a report for the day
slow_query_report = os.path.join(
self.computer_partition_root_path,
'srv',
'monitor',
'private',
'slowquery_digest',
'slowquery_digest.txt-2050-01-01.xz',
)
with lzma.open(slow_query_report, 'rt') as f:
# this is the hash for our "select sleep(n)" slow query
self.assertIn("ID 0xF9A57DD5A41825CA", f.read())
# on next day execution of logrotate, log files are compressed
self._executeCrontabAtDate('logrotate', '2050-01-02')
self.assertTrue(os.path.exists(rotated_log_file + '.xz'))
self.assertFalse(os.path.exists(rotated_log_file))
# there's a promise checking that the threshold is not exceeded
# and it reports a problem since we set a threshold of 1 slow query
check_slow_query_promise_plugin = getPromisePluginParameterDict(
os.path.join(
self.computer_partition_root_path,
'etc',
'plugin',
'check-slow-query-pt-digest-result.py',
))
with self.assertRaises(subprocess.CalledProcessError) as error_context:
subprocess.check_output('faketime 2050-01-01 %s' % check_slow_query_promise_plugin['command'], shell=True)
self.assertEqual(
error_context.exception.output,
b"""\
Threshold is lower than expected:
Expected total queries : 1.0 and current is: 2
Expected slowest query : 0.1 and current is: 3
""")
class TestMariaDB(MariaDBTestCase):
def test_utf8_collation(self):
# type: () -> None
cnx = self.getDatabaseConnection()
with contextlib.closing(cnx):
cnx.query(
"""
CREATE TABLE test_utf8_collation (
col1 CHAR(10)
)
""")
cnx.store_result()
cnx.query(
"""
insert into test_utf8_collation values ("Ã "), ("ã‚")
""")
cnx.store_result()
cnx.query(
"""
select * from test_utf8_collation where col1 = "a"
""")
self.assertEqual(((u'Ã ',),), cnx.store_result().fetch_row(maxrows=2))
class TestMroonga(MariaDBTestCase):
def test_mroonga_plugin_loaded(self):
# type: () -> None
cnx = self.getDatabaseConnection()
with contextlib.closing(cnx):
cnx.query("show plugins")
plugins = cnx.store_result().fetch_row(maxrows=1000)
self.assertIn(
('Mroonga', 'ACTIVE', 'STORAGE ENGINE', 'ha_mroonga.so', 'GPL'),
plugins)
def test_mroonga_normalize_udf(self):
# type: () -> None
# example from https://mroonga.org/docs/reference/udf/mroonga_normalize.html#usage
cnx = self.getDatabaseConnection()
with contextlib.closing(cnx):
cnx.query(
"""
SELECT mroonga_normalize("ABCDã‚ãƒã†ã‡ã‘")
""")
# XXX this is returned as bytes by mroonga/mariadb (this might be a bug)
self.assertEqual(((u'abcdã‚ãƒã†ã‡ãƒªãƒƒãƒˆãƒ«'.encode('utf-8'),),),
cnx.store_result().fetch_row(maxrows=2))
if 0:
# this example fail with:
# OperationalError: (1123, "Can't initialize function 'mroonga_normalize'; mroonga_normalize(): nonexistent normalizer NormalizerMySQLUnicodeCIExceptKanaCI")
# same error on mroonga "official" docker images using mysql
# https://hub.docker.com/layers/groonga/mroonga/latest/images/sha256-e5a979801c95544ca3a1228d2c4d819820850e0162649553f2e94850e5e1c988?context=explore
# so it's probably OK to ignore
cnx.query(
"""
SELECT mroonga_normalize("aBcDã‚ãƒã‚¦ã‚§ã‘", "NormalizerMySQLUnicodeCIExceptKanaCIKanaWithVoicedSoundMark")
""")
self.assertEqual(((u'ABCDã‚ãƒã†ã‡ã‘'.encode('utf-8'),),),
cnx.store_result().fetch_row(maxrows=2))
def test_mroonga_full_text_normalizer(self):
# type: () -> None
# example from https://mroonga.org//docs/tutorial/storage.html#how-to-specify-the-normalizer
cnx = self.getDatabaseConnection()
with contextlib.closing(cnx):
cnx.query("SET NAMES utf8")
cnx.store_result()
cnx.query(
"""
CREATE TABLE diaries (
day DATE PRIMARY KEY,
content VARCHAR(64) NOT NULL,
FULLTEXT INDEX (content) COMMENT 'normalizer "NormalizerAuto"'
) Engine=Mroonga DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci
""")
cnx.store_result()
cnx.query(
"""INSERT INTO diaries VALUES ("2013-04-23", "ブラックコーヒーを飲んã 。")""")
cnx.store_result()
cnx.query(
"""
SELECT *
FROM diaries
WHERE MATCH (content) AGAINST ("+ãµã‚‰ã¤ã" IN BOOLEAN MODE)
""")
self.assertEqual((), cnx.store_result().fetch_row(maxrows=2))
cnx.query(
"""
SELECT *
FROM diaries
WHERE MATCH (content) AGAINST ("+ブラック" IN BOOLEAN MODE)
""")
self.assertEqual(
((datetime.date(2013, 4, 23), u'ブラックコーヒーを飲んã 。'),),
cnx.store_result().fetch_row(maxrows=2),
)
def test_mroonga_full_text_normalizer_TokenBigramSplitSymbolAlphaDigit(self):
# type: () -> None
# Similar to as ERP5's testI18NSearch with erp5_full_text_mroonga_catalog
cnx = self.getDatabaseConnection()
with contextlib.closing(cnx):
cnx.query(
"""
CREATE TABLE `full_text` (
`uid` BIGINT UNSIGNED NOT NULL,
`SearchableText` MEDIUMTEXT,
PRIMARY KEY (`uid`),
FULLTEXT `SearchableText` (`SearchableText`) COMMENT 'parser "TokenBigramSplitSymbolAlphaDigit"'
) ENGINE=mroonga
""")
cnx.store_result()
cnx.query(
"""
INSERT INTO full_text VALUES
(1, "Gabriel Fauré Quick brown fox jumps over the lazy dog"),
(2, "æ¦è€…å°è·¯ 実篤 Slow white fox jumps over the diligent dog."),
(3, "( - + )")""")
cnx.store_result()
cnx.query(
"""
SELECT uid
FROM full_text
WHERE MATCH (`full_text`.`SearchableText`) AGAINST ('*D+ Faure' IN BOOLEAN MODE)
""")
self.assertEqual(((1,),), cnx.store_result().fetch_row(maxrows=2))
cnx.query(
"""
SELECT uid
FROM full_text
WHERE MATCH (`full_text`.`SearchableText`) AGAINST ('*D+ æ¦è€…' IN BOOLEAN MODE)
""")
self.assertEqual(((2,),), cnx.store_result().fetch_row(maxrows=2))
cnx.query(
"""
SELECT uid
FROM full_text
WHERE MATCH (`full_text`.`SearchableText`) AGAINST ('*D+ +quick +fox +dog' IN BOOLEAN MODE)
""")
self.assertEqual(((1,),), cnx.store_result().fetch_row(maxrows=2))
def test_mroonga_full_text_stem(self):
# type: () -> None
# example from https://mroonga.org//docs/tutorial/storage.html#how-to-specify-the-token-filters
cnx = self.getDatabaseConnection()
with contextlib.closing(cnx):
cnx.query("SELECT mroonga_command('register token_filters/stem')")
self.assertEqual(((b'true',),), cnx.store_result().fetch_row(maxrows=2))
cnx.query(
"""
CREATE TABLE memos (
id INT NOT NULL PRIMARY KEY,
content TEXT NOT NULL,
FULLTEXT INDEX (content) COMMENT 'normalizer "NormalizerAuto", token_filters "TokenFilterStem"'
) Engine=Mroonga DEFAULT CHARSET=utf8
""")
cnx.store_result()
cnx.query(
"""INSERT INTO memos VALUES (1, "I develop Groonga"), (2, "I'm developing Groonga"), (3, "I developed Groonga")"""
)
cnx.store_result()
cnx.query(
"""
SELECT *
FROM memos
WHERE MATCH (content) AGAINST ("+develops" IN BOOLEAN MODE)
""")
self.assertEqual([
(1, "I develop Groonga"),
(2, "I'm developing Groonga"),
(3, "I developed Groonga"),
], list(sorted(cnx.store_result().fetch_row(maxrows=4))))
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/test/test/test_wcfs.py 0000664 0000000 0000000 00000005105 14241130220 0030744 0 ustar 00root root 0000000 0000000 # Copyright (C) 2021 Nexedi SA and Contributors.
#
# This program is free software: you can Use, Study, Modify and Redistribute
# it under the terms of the GNU General Public License version 3, or (at your
# option) any later version, as published by the Free Software Foundation.
#
# You can also Link and Combine this program with other software covered by
# the terms of any of the Free Software licenses or any of the Open Source
# Initiative approved licenses and Convey the resulting work. Corresponding
# source of such a combination shall include the source code for all other
# software used.
#
# This program is distributed WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See COPYING file for full licensing terms.
# See https://www.nexedi.com/licensing for rationale and options.
from __future__ import absolute_import
import json
import os.path
import unittest
from slapos.grid.utils import md5digest
from . import ERP5InstanceTestCase, setUpModule as _setUpModule
from .test_erp5 import TestPublishedURLIsReachableMixin
# skip tests when software release is built with wendelin.core 1.
def setUpModule():
_setUpModule()
cls = ERP5InstanceTestCase
if not os.path.exists(
os.path.join(
cls.slap.software_directory,
md5digest(cls.getSoftwareURL()),
'bin', 'wcfs')):
raise unittest.SkipTest("built with wendelin.core 1")
class TestWCFS(ERP5InstanceTestCase, TestPublishedURLIsReachableMixin):
"""Test Wendelin Core File System
"""
__partition_reference__ = 'wcfs'
@classmethod
def getInstanceParameterDict(cls):
return {'_': json.dumps({'wcfs': {'enable': True}})}
def test_wcfs_accessible(self):
"""Verify that wcfs filesystem is basically accessible.
- we can read .wcfs/zurl
- its content is equal to published `serving-zurl`
"""
zurl = json.loads(
self.getComputerPartition('wcfs').getConnectionParameter('_')
)['serving-zurl']
mntpt = lookupMount(zurl)
zurl_ = readfile("%s/.wcfs/zurl" % mntpt)
self.assertEqual(zurl_, zurl)
# lookupMount returns /proc/mount entry for wcfs mounted to serve zurl.
def lookupMount(zurl):
for line in readfile('/proc/mounts').splitlines():
# fuse.wcfs ...
zurl_, mntpt, typ, _ = line.split(None, 3)
if typ != 'fuse.wcfs':
continue
if zurl_ == zurl:
return mntpt
raise KeyError("lookup mount %s: no /proc/mounts entry" % zurl)
# readfile returns content of file @path.
def readfile(path):
with open(path, 'r') as f:
return f.read()
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/upgrade_test/ 0000775 0000000 0000000 00000000000 14241130220 0027120 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/upgrade_test/README.md 0000664 0000000 0000000 00000000050 14241130220 0030372 0 ustar 00root root 0000000 0000000 Upgrade tests for ERP5 software release
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/upgrade_test/setup.py 0000664 0000000 0000000 00000003732 14241130220 0030637 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2020 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.upgrade_erp5'
with open("README.md") as f:
long_description = f.read()
setup(name=name,
version=version,
description="Upgrade test for SlapOS' ERP5 software release",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'supervisor',
'slapos.libnetworkcache',
'typing; python_version<"3"',
],
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5/upgrade_test/test.py 0000664 0000000 0000000 00000014047 14241130220 0030457 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2020 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import glob
import json
import os
import tempfile
import time
import requests
import urlparse
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
from slapos.testing.testcase import installSoftwareUrlList
from slapos.testing.testcase import SlapOSNodeCommandError
from slapos.grid.utils import md5digest
old_software_release_url = 'https://lab.nexedi.com/nexedi/slapos/raw/1.0.167.5/software/erp5/software.cfg'
new_software_release_url = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'software.cfg'))
_, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
old_software_release_url,
software_id="upgrade_erp5",
skip_software_check=True,
)
def setUpModule():
installSoftwareUrlList(
SlapOSInstanceTestCase,
[old_software_release_url, new_software_release_url],
debug=SlapOSInstanceTestCase._debug,
)
class ERP5UpgradeTestCase(SlapOSInstanceTestCase):
# use short partition names for unix sockets
__partition_reference__ = 'u'
@classmethod
def setUpOldInstance(cls):
"""setUp hook executed while to old instance is running, before update
"""
pass
_current_software_url = old_software_release_url
@classmethod
def getSoftwareURL(cls):
return cls._current_software_url
@classmethod
def setUpClass(cls):
# request and instanciate with old software url
super(ERP5UpgradeTestCase, cls).setUpClass()
cls.setUpOldInstance()
# request instance on new software
cls._current_software_url = new_software_release_url
cls.logger.debug('requesting instance on new software')
cls.requestDefaultInstance()
# wait for slapos node instance
snapshot_name = "{}.{}.setUpClass new instance".format(
cls.__module__, cls.__name__)
try:
if cls._debug and cls.instance_max_retry:
try:
cls.slap.waitForInstance(max_retry=cls.instance_max_retry - 1)
except SlapOSNodeCommandError:
cls.slap.waitForInstance(debug=True)
else:
cls.slap.waitForInstance(max_retry=cls.instance_max_retry,
debug=cls._debug)
cls.logger.debug("instance on new software done")
except BaseException:
cls.logger.exception("Error during instance on new software")
cls._storeSystemSnapshot(snapshot_name)
cls._cleanup(snapshot_name)
cls.setUp = lambda self: self.fail('Setup Class failed.')
raise
else:
cls._storeSystemSnapshot(snapshot_name)
cls.computer_partition = cls.requestDefaultInstance()
class TestERP5Upgrade(ERP5UpgradeTestCase):
@classmethod
def setUpOldInstance(cls):
cls._default_instance_old_parameter_dict = json.loads(
cls.computer_partition.getConnectionParameterDict()['_'])
def test_published_url_is_same(self):
default_instance_new_parameter_dict = json.loads(
self.computer_partition.getConnectionParameterDict()['_'])
self.assertEqual(
default_instance_new_parameter_dict['family-default-v6'],
self._default_instance_old_parameter_dict['family-default-v6'],
)
def test_published_url_is_reachable(self):
default_instance_new_parameter_dict = json.loads(
self.computer_partition.getConnectionParameterDict()['_'])
# get certificate from caucase
with tempfile.NamedTemporaryFile(
prefix="ca.crt.pem",
mode="w",
delete=False,
) as ca_cert:
ca_cert.write(
requests.get(
urlparse.urljoin(
default_instance_new_parameter_dict['caucase-http-url'],
'/cas/crt/ca.crt.pem',
)).text)
ca_cert.flush()
# use a session to retry on failures, when ERP5 is not ready.
# (see also TestPublishedURLIsReachableMixin)
session = requests.Session()
session.mount(
default_instance_new_parameter_dict['family-default-v6'],
requests.adapters.HTTPAdapter(
max_retries=requests.packages.urllib3.util.retry.Retry(
total=60,
backoff_factor=.5,
status_forcelist=(404, 500, 503))))
session.get(
'{}/{}/login_form'.format(
default_instance_new_parameter_dict['family-default-v6'],
default_instance_new_parameter_dict['site-id'],
),
verify=False,
# TODO: we don't use caucase yet here.
# verify=ca_cert.name,
).raise_for_status()
def test_all_instances_use_new_software_release(self):
self.assertEqual(
{
os.path.basename(os.readlink(sr))
for sr in glob.glob(
os.path.join(
self.slap.instance_directory,
'*',
'software_release',
))
},
{md5digest(self.getSoftwareURL())},)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/ 0000775 0000000 0000000 00000000000 14241130220 0026200 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/buildout.hash.cfg 0000664 0000000 0000000 00000001567 14241130220 0031443 0 ustar 00root root 0000000 0000000 # THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[template]
filename = instance.cfg
md5sum = 5bd72da73a8b84f70f568686924d7d6c
[template-default]
filename = instance-default.cfg.jinja.in
md5sum = 0c9ea9fa7417885889a536ec2cf2093f
instance-default.cfg.jinja.in 0000664 0000000 0000000 00000022411 14241130220 0033527 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode {%- set slapparameter_dict = dict(default_parameter_dict, **slapparameter_dict) -%}
[buildout]
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
offline = true
extends = ${monitor2-template:output}
parts =
testnode-service
certificate-authority
ca-shellinabox
ca-httpd-testnode
monitor-base
monitor-publish
testnode-frontend
resiliency-exclude-file
shellinabox-frontend-reload
promises
testnode-compatibility
[monitor-publish]
recipe = slapos.cookbook:publish
url = $${shellinabox-frontend:url}
frontend-url = $${testnode-frontend:connection-secure_access}
log-frontend-url = $${testnode-log-frontend:connection-secure_access}
[pwgen]
recipe = slapos.cookbook:generate.password
storage-path = $${buildout:directory}/.password
[testnode]
recipe = slapos.cookbook:erp5testnode
slapos-directory = $${directory:slapos}
working-directory = $${directory:testnode}
test-suite-directory = $${directory:test-suite}
shared-part-list = $${directory:shared}
proxy-host = {{ partition_ipv4 }}
proxy-port = 5000
log-directory = $${directory:log}
srv-directory = $${rootdirectory:srv}
software-directory = $${directory:software}
run-directory = $${directory:run}
test-node-title = {{ slapparameter_dict['test-node-title'] }}
node-quantity = {{ slapparameter_dict['node-quantity'] }}
ipv4-address = {{ partition_ipv4 }}
ipv6-address = {{ partition_ipv6 }}
test-suite-master-url = {{ slapparameter_dict['test-suite-master-url'] }}
instance-dict = {{ slapparameter_dict['instance-dict'] }}
{%- if isinstance(slapparameter_dict['software-path-list'], str) %}
software-path-list = {{ slapparameter_dict['software-path-list'] }}
{%- else %}
software-path-list = {{ json.dumps(slapparameter_dict['software-path-list']) }}
{%- endif %}
keep-log-days = {{ slapparameter_dict['keep-log-days'] }}
git-binary = ${git:location}/bin/git
slapos-binary = ${buildout:bin-directory}/slapos
testnode = ${buildout:bin-directory}/testnode
zip-binary = ${zip:location}/bin/zip
httpd-pid-file = $${basedirectory:run}/httpd.pid
httpd-lock-file = $${basedirectory:run}/httpd.lock
httpd-conf-file = $${rootdirectory:etc}/httpd.conf
httpd-wrapper = $${rootdirectory:bin}/httpd
httpd-port = 9080
httpd-software-access-port = 9081
httpd-ip = {{ partition_ipv6 }}
httpd-log-directory = $${basedirectory:log}
httpd-software-directory = $${directory:software}
httpd-cert-file = $${rootdirectory:etc}/httpd-public.crt
httpd-key-file = $${rootdirectory:etc}/httpd-private.key
frontend-url = $${testnode-frontend:connection-secure_access}
log-frontend-url = $${testnode-log-frontend:connection-secure_access}
configuration-file = $${rootdirectory:etc}/erp5testnode.cfg
log-file = $${basedirectory:log}/erp5testnode.log
wrapper = $${buildout:bin-directory}/erp5testnode-service
# Binaries
apache-binary = ${apache:location}/bin/httpd
apache-modules-dir = ${apache:location}/modules
apache-mime-file = ${apache:location}/conf/mime.types
apache-htpasswd = ${apache:location}/bin/htpasswd
[testnode-service]
# wrapper over erp5testnode which restarts the service when configuration changed
recipe = slapos.cookbook:wrapper
wrapper-path = $${basedirectory:services}/erp5testnode
command-line = $${testnode:wrapper}
hash-files =
$${testnode:wrapper}
$${testnode:configuration-file}
[shell-environment]
shell = ${bash:location}/bin/bash
[shellinabox]
recipe = slapos.recipe.template
# We cannot use slapos.cookbook:wrapper here because this recipe escapes too much
socket = $${directory:run}/siab.sock
output = $${basedirectory:services}/shellinaboxd
inline =
#!/bin/sh
exec ${shellinabox:location}/bin/shellinaboxd \
--unixdomain-only=$${:socket}:$(id -u):$(id -g):0600 \
--service "/:$(id -u):$(id -g):HOME:$${shell-environment:shell} -l"
[shellinabox-frontend-config]
recipe = slapos.recipe.template
output = $${directory:etc}/$${:_buildout_section_name_}
inline =
https://$${:hostname}:$${:port} {
bind $${:ipv6}
tls self_signed
gzip
log stdout
errors stderr
proxy / unix:$${shellinabox:socket}
basicauth $${:username} $${:passwd} {
realm "Test Node $${testnode:test-node-title}"
/
}
}
ipv6 = {{ partition_ipv6 }}
hostname = [$${:ipv6}]
port = 8080
username = testnode
passwd = $${pwgen:passwd}
cert-file = $${directory:shellinabox}/public.crt
key-file = $${directory:shellinabox}/private.key
backend-url = https://$${:username}:$${:passwd}@$${:hostname}:$${:port}
[shellinabox-frontend]
recipe = slapos.cookbook:wrapper
wrapper-path = $${rootdirectory:bin}/$${:_buildout_section_name_}
command-line =
${caddy:output} -conf $${shellinabox-frontend-config:output} -pidfile $${:pidfile}
url = $${shellinabox-frontend-config:backend-url}
hostname = $${shellinabox-frontend-config:ipv6}
port = $${shellinabox-frontend-config:port}
pidfile = $${basedirectory:run}/$${:_buildout_section_name_}.pid
[shellinabox-frontend-reload]
recipe = slapos.cookbook:wrapper
wrapper-path = $${basedirectory:services}/$${:_buildout_section_name_}
command-line =
${bash:location}/bin/bash -c
"kill -s USR1 $$(${coreutils:location}/bin/cat $${shellinabox-frontend:pidfile}) \
&& ${coreutils:location}/bin/sleep infinity"
hash-files =
$${shellinabox-frontend-config:output}
$${shellinabox-frontend:wrapper-path}
[certificate-authority]
recipe = slapos.cookbook:certificate_authority
openssl-binary = ${openssl:location}/bin/openssl
ca-dir = $${directory:ca-dir}
requests-directory = $${cadirectory:requests}
wrapper = $${basedirectory:services}/ca
ca-private = $${cadirectory:private}
ca-certs = $${cadirectory:certs}
ca-newcerts = $${cadirectory:newcerts}
ca-crl = $${cadirectory:crl}
[cadirectory]
recipe = slapos.cookbook:mkdirectory
requests = $${directory:ca-dir}/requests/
private = $${directory:ca-dir}/private/
certs = $${directory:ca-dir}/certs/
newcerts = $${directory:ca-dir}/newcerts/
crl = $${directory:ca-dir}/crl/
[ca-shellinabox]
<= certificate-authority
recipe = slapos.cookbook:certificate_authority.request
executable = $${shellinabox-frontend:wrapper-path}
wrapper = $${basedirectory:services}/shellinabox-frontend
key-file = $${shellinabox-frontend-config:key-file}
cert-file = $${shellinabox-frontend-config:cert-file}
[ca-httpd-testnode]
<= certificate-authority
recipe = slapos.cookbook:certificate_authority.request
executable = $${testnode:httpd-wrapper}
wrapper = $${basedirectory:services}/httpd
key-file = $${testnode:httpd-key-file}
cert-file = $${testnode:httpd-cert-file}
[rootdirectory]
recipe = slapos.cookbook:mkdirectory
etc = $${buildout:directory}/etc
var = $${buildout:directory}/var
srv = $${buildout:directory}/srv
bin = $${buildout:directory}/bin
tmp = $${buildout:directory}/tmp
[basedirectory]
recipe = slapos.cookbook:mkdirectory
log = $${rootdirectory:var}/log
services = $${rootdirectory:etc}/service
run = $${rootdirectory:var}/run
[directory]
recipe = slapos.cookbook:mkdirectory
slapos = $${rootdirectory:srv}/slapos
testnode = $${buildout:directory}/t
shared = $${rootdirectory:srv}/shared
test-suite = $${rootdirectory:srv}/test_suite
log = $${basedirectory:log}/testnode
run = $${basedirectory:run}/testnode
software = $${rootdirectory:srv}/software
shellinabox = $${rootdirectory:srv}/shellinabox
ca-dir = $${rootdirectory:srv}/ca
[testnode-compatibility]
# Remove old ~/srv/testnode
recipe = slapos.recipe.build
update =
import os
from zc.buildout.rmtree import rmtree
old_testnode_path = self.buildout['rootdirectory']['srv'] + '/testnode'
if os.path.exists(old_testnode_path):
rmtree(old_testnode_path)
[resiliency-exclude-file]
# Generate rdiff exclude file in case of resiliency
recipe = collective.recipe.template
input = inline: **
output = $${directory:srv}/exporter.exclude
[request-shared-frontend]
<= slap-connection
recipe = slapos.cookbook:requestoptional
# XXX We have hardcoded SR URL here.
software-url = http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg
slave = true
config-https-only = true
return = domain secure_access
[testnode-frontend]
<= request-shared-frontend
name = Test Node Frontend $${testnode:test-node-title}
config-url = https://[$${testnode:httpd-ip}]:$${testnode:httpd-software-access-port}
[testnode-log-frontend]
<= request-shared-frontend
name = Test Node Logs Frontend $${testnode:test-node-title}
config-url = https://[$${testnode:httpd-ip}]:$${testnode:httpd-port}
[promises]
recipe =
instance-promises =
$${shellinabox-frontend-listen-promise:name}
# $${shellinabox-frontend-available-promise:name}
$${testnode-log-frontend-promise:name}
[shellinabox-frontend-listen-promise]
<= monitor-promise-base
promise = check_socket_listening
name = $${:_buildout_section_name_}.py
config-host = $${shellinabox-frontend:hostname}
config-port = $${shellinabox-frontend:port}
## This promise fails in a test suite
## due to ports conflict with the testnode of the test suite
# [shellinabox-frontend-available-promise]
# <= monitor-promise-base
# module = check_url_available
# name = $${:_buildout_section_name_}.py
# config-url = https://$${shellinabox-frontend-config:hostname}:$${shellinabox-frontend-config:port}
# config-username = $${shellinabox-frontend-config:username}
# config-password = $${shellinabox-frontend-config:passwd}
[testnode-log-frontend-promise]
<= monitor-promise-base
promise = check_url_available
name = $${:_buildout_section_name_}.py
config-url = $${testnode-log-frontend:connection-secure_access}
instance-erp5testnode-input-schema.json 0000664 0000000 0000000 00000001515 14241130220 0035634 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode {
"type": "object",
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Input Parameters",
"properties": {
"test-node-title": {
"title": "Test Node Title",
"description": "Name for the Test Node which will be used at ERP5.",
"type": "string"
},
"node-quantity": {
"title": "Quantity of Parallel runs",
"description": "QUantity of Parallel tests will be executed on this Node.",
"type": "integer"
},
"test-suite-master-url": {
"title": "Task Distribution URL",
"description": "Url for the task distributor master on portal_task_distribution",
"type": "string"
},
"keep-log-days": {
"title": "Retention of test logs",
"description": "Number of days to keep logs and snapshots",
"type": "number",
"default": 15
}
}
}
instance-output-schema.json 0000664 0000000 0000000 00000000574 14241130220 0033422 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Values returned by Apache Frontend instanciation",
"properties": {
"password": {
"description": "Password to access shellinabox.",
"type": "string"
},
"url": {
"description": "IPv6 URL to access the shell in a box.",
"type": "string"
}
},
"type": "object"
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/instance.cfg 0000664 0000000 0000000 00000002262 14241130220 0030467 0 ustar 00root root 0000000 0000000 [buildout]
parts =
switch_softwaretype
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
offline = true
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration
computer = $${slap-connection:computer-id}
partition = $${slap-connection:partition-id}
url = $${slap-connection:server-url}
key = $${slap-connection:key-file}
cert = $${slap-connection:cert-file}
[switch_softwaretype]
recipe = slapos.cookbook:switch-softwaretype
RootSoftwareInstance = $${:default}
default = instance-default:output
[instance-default]
recipe = slapos.recipe.template:jinja2
url = ${template-default:output}
output = $${buildout:directory}/instance-default.cfg
context =
import json json
jsonkey default_parameter_dict :default-parameters
key slapparameter_dict slap-configuration:configuration
key partition_ipv6 slap-configuration:ipv6-random
key partition_ipv4 slap-configuration:ipv4-random
default-parameters =
{
"node-quantity": 1,
"test-suite-master-url": "",
"instance-dict": "",
"software-path-list": ["https://lab.nexedi.com/nexedi/slapos/raw/1.0.210/software/seleniumrunner/software.cfg"],
"keep-log-days": 15
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/software.cfg 0000664 0000000 0000000 00000002345 14241130220 0030517 0 ustar 00root root 0000000 0000000 [buildout]
extends =
buildout.hash.cfg
../../stack/slapos.cfg
../../component/git/buildout.cfg
../../component/lxml-python/buildout.cfg
../../component/zip/buildout.cfg
../../component/bash/buildout.cfg
../../component/caddy/buildout.cfg
../../component/coreutils/buildout.cfg
../../component/shellinabox/buildout.cfg
../../component/pwgen/buildout.cfg
../../component/apache/buildout.cfg
../../stack/monitor/buildout.cfg
parts =
slapos-command
slapos-cookbook
template
eggs
zip
git
apache
[python]
part = python3
[eggs]
recipe = zc.recipe.egg
eggs =
erp5.util[testnode,benchmark]
${lxml-python:egg}
[template]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/template.cfg
[template-default]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/template-default.cfg.jinja
[versions]
dnspython = 1.15.0
PyXML = 0.8.5
WebOb = 1.8.5
WebTest = 2.0.33
soupsieve = 1.8
waitress = 1.4.4
z3c.etestbrowser = 3.0.1
zope.testbrowser = 5.3.2
WSGIProxy2 = 0.4.6
beautifulsoup4 = 4.7.1
zope.cachedescriptors = 4.3.1
zope.event = 4.4
zope.schema = 4.9.3
zope.deferredimport = 4.3.1
zope.proxy = 4.3.5
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/software.cfg.json 0000664 0000000 0000000 00000000617 14241130220 0031467 0 ustar 00root root 0000000 0000000 {
"name": "ERP5 Test Node",
"description": "ERP5 Test Node Runner",
"serialisation": "xml",
"software-type": {
"default": {
"title": "Default",
"software-type": "default",
"description": "Default setup for ERP5TestNode Instance.",
"request": "instance-erp5testnode-input-schema.json",
"response": "instance-output-schema.json",
"index": 0
}
}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/test/ 0000775 0000000 0000000 00000000000 14241130220 0027157 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/test/README.md 0000664 0000000 0000000 00000000050 14241130220 0030431 0 ustar 00root root 0000000 0000000 Tests for erp5testnode software release
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/test/setup.py 0000664 0000000 0000000 00000003664 14241130220 0030702 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2019 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.erp5testnode'
with open("README.md") as f:
long_description = f.read()
setup(
name=name,
version=version,
description="Test for SlapOS' erp5testnode",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'slapos.libnetworkcache',
'erp5.util',
'requests',
],
zip_safe=True,
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/test/test.py 0000664 0000000 0000000 00000003666 14241130220 0030523 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2019 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import os
import requests
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'software.cfg')))
class TestnodeTest(SlapOSInstanceTestCase):
@classmethod
def getInstanceParameterDict(cls):
return {"test-node-title": "hello"}
def test(self):
connexion_parameters = self.computer_partition.getConnectionParameterDict()
self.assertIn('url', connexion_parameters)
self.assertIn('frontend-url', connexion_parameters)
self.assertIn('log-frontend-url', connexion_parameters)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite/ 0000775 0000000 0000000 00000000000 14241130220 0030231 5 ustar 00root root 0000000 0000000 buildout.hash.cfg 0000664 0000000 0000000 00000001441 14241130220 0033404 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite # THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[template-erp5testnode]
filename = instance.cfg.in
md5sum = 681f4159e4ac2542d0fbc7a894e43d79
deploy-test/ 0000775 0000000 0000000 00000000000 14241130220 0032423 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite README.rst 0000664 0000000 0000000 00000000613 14241130220 0034112 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite/deploy-test deply-test
==========
Introduction
------------
This is software release to run tests on VMs, but without direct access (like
ssh) to the VM.
It is supposed to be used as backend for ERP5TestNode.
Characteristics and limitations:
* partitions share the same user (as they access files directly)
* this SR will be installed and instantiated only from local file system
(like git clone)
buildout.hash.cfg 0000664 0000000 0000000 00000001620 14241130220 0035654 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite/deploy-test # THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[deploy-script-controller-script]
filename = deploy-script-controller
md5sum = 8357771b70efd0740561b1cb46f6955e
[template-deploy-test]
filename = instance.cfg.in
md5sum = c5db797980951b764b69aaa4b60a0380
deploy-script-controller 0000664 0000000 0000000 00000005367 14241130220 0037340 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite/deploy-test #!/bin/bash
# Script for controlling deploy script
#
# Assumption: deployment script returning with code 0
# run correctly and everything is done
#
# This script is run inside of KVM, by passing its
# URL with bootstrap-script-url
# The script configuration happens by passing
# it in text form with data-to-vm
#
# Format of data-to-vm is shell script:
# URL=\nWAITTIME=\nTRIES=
#
# Expected values in configuration:
# * URL - the url of the script to download and test
# * WAITTIME - waiting time, before next try
# * TRIES - amount of tries
# Possible TODOs:
# * post results on each try
# * use function + trap to assure posting on exit
LOG_FILE=/var/log/test-script-deployment.log
wget -O /tmp/test-script.cfg.$$ -q http://10.0.2.100/data
source /tmp/test-script.cfg.$$
if [ -z "$LOG_FILE" ] ; then
echo "Output log file is missing"
exit 1
fi
if [ -z "$URL" ] ; then
echo "URL is missing" >> $LOG_FILE 2>&1
exit 1
fi
if [ -z "$WAITTIME" ] ; then
echo "WAITTIME missing" >> $LOG_FILE 2>&1
exit 1
fi
if [ -z "$TRIES" ] ; then
echo "TRIES missing" >> $LOG_FILE 2>&1
exit 1
fi
DEPLOYMENT_SCRIPT=/tmp/test-script-deployment.bash.$$
wget -O $DEPLOYMENT_SCRIPT -q $URL
if [[ ! -s "$DEPLOYMENT_SCRIPT" ]] ; then
echo "exit 1" > $DEPLOYMENT_SCRIPT
fi
function add_log ()
{
LOG_FILE=$1
for f in /opt/slapos/log/slapos-node-{software,instance}.log ; do
echo "Tail of '$f':" >> $LOG_FILE
tail -n 500 $f >> $LOG_FILE
done
}
function add_checks ()
{
LOG_FILE=$1
echo 'lsof -Pni' >> $LOG_FILE 2>&1
lsof -Pni >> $LOG_FILE 2>&1
echo 'iptables-save' >> $LOG_FILE 2>&1
iptables-save >> $LOG_FILE 2>&1
for f in /tmp/playbook-* ; do echo $f ; cat $f; echo; done >> $LOG_FILE 2>&1
echo 'slapos node status' >> $LOG_FILE 2>&1
slapos node status >> $LOG_FILE 2>&1
}
function upload ()
{
try=$1
LOG_FILE=$2
add_log $LOG_FILE
add_checks $LOG_FILE
t=`date '+%Y%m%d%H%S'`
mv $LOG_FILE ${LOG_FILE}.$t
curl -q -X POST --data-urlencode "path=test-script-result/log-file.log.$t" --data-urlencode "content@${LOG_FILE}.$t" http://10.0.2.100/
}
try=1
while true; do
echo "$0: Try $try. Running '/bin/bash $DEPLOYMENT_SCRIPT'" >> $LOG_FILE 2>&1
export TEST_YML_PATH
/bin/bash $DEPLOYMENT_SCRIPT >> $LOG_FILE 2>&1
result=$?
if [ $result == 0 ] ; then
echo "$0: Try $try. Script executed successfully." >> $LOG_FILE 2>&1
upload $try $LOG_FILE
break
fi
if (( try > TRIES )) ; then
echo "$0: Try $try. Amount of tries $TRIES exceeded, giving up." >> $LOG_FILE 2>&1
upload $try $LOG_FILE
break
fi
# wait WAITTIME before checking the state
echo "$0: Try $try. Sleeping $WAITTIME before retry." >> $LOG_FILE 2>&1
upload $try $LOG_FILE
sleep $WAITTIME
((try++))
done
exit $result
instance-deploy-test.cfg.jinja2 0000664 0000000 0000000 00000003572 14241130220 0040342 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite/deploy-test {# Get test type, default to script-from-url, as defined in instance-input-schema.json -#}
{% set test_type = slapparameter_dict.get('test-type', 'script-from-url') -%}
{# Choose parameters according to test type -#}
{% if test_type == 'script-from-url' -%}
{% set script_url = slapparameter_dict.get('script-to-test-url') -%}
{% set test_yml_path = '/not/required' -%}
{% elif test_type == 'cloned-playbook' -%}
{% set script_url = 'http://10.0.2.100/standalone-local-playbook' -%}
{% set test_yml_path = slapparameter_dict.get('yml-path-to-test') -%}
{% else -%}
The test_type = "{{ test_type }}" is unsupported.
{% endif -%}
[buildout]
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
parts =
request-kvm
[request-kvm]
<= slap-connection
recipe = slapos.cookbook:request
software-url = ${slap-connection:software-release-url}
software-type = kvm
name = test-kvm
sla-computer_guid = ${slap-connection:computer-id}
# Tested image
# Passed by request
config-virtual-hard-drive-url = {{ slapparameter_dict.get('image-to-test-url') }}
config-virtual-hard-drive-md5sum = {{ slapparameter_dict.get('image-to-test-md5sum') }}
# The test script
config-bootstrap-script-url = {{ in_vm_test_script }}#{{ in_vm_test_script_md5 }}
# Script configuration
config-data-to-vm =
URL={{ script_url }}
WAITTIME={{ waittime }}
TRIES={{ tries }}
TEST_YML_PATH={{ test_yml_path }}
# require HTTP server
config-enable-http-server = true
# VM options
config-ram-size = 4096
config-cpu-count = 2
# await for system to be ready
return =
url
backend-url
ipv6
nat-rule-port-tcp-443
nat-rule-port-tcp-80
[directory]
recipe = slapos.cookbook:mkdirectory
home = ${buildout:directory}
etc = ${:home}/etc/
var = ${:home}/var/
srv = ${:home}/srv/
bin = ${:home}/bin/
tmp = ${:home}/tmp/
log = ${:var}/log/
services = ${:etc}/service/
scripts = ${:etc}/run/
instance-input-schema.json 0000664 0000000 0000000 00000003234 14241130220 0037517 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite/deploy-test {
"type": "object",
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Input Parameters",
"properties": {
"image-to-test-url": {
"title": "Image To Test (URL)",
"description": "Absolute URL to QCOW2 vm-bootstrap compatible image.",
"type": "string"
},
"image-to-test-md5sum": {
"title": "MD5 checksum of Image To Test.",
"description": "MD5 checksum of QCOW2 vm-bootstrap compatible image.",
"type": "string"
},
"test-relative-directory": {
"title": "Relative directory where tests are found.",
"description": "The base of this directory is [slapos-package].",
"type": "string"
},
"test-type": {
"title": "Type of the test.",
"type": "string",
"default": "script-from-url",
"enum": [
"script-from-url",
"cloned-playbook"
]
},
"script-to-test-url": {
"title": "Optional URL of script to test, used for test-type=script-from-url.",
"description": "Optional URL of script to test, which will be injected into the VM.\nThis parameter will NOT use local git clone of [slapos-package] part, it will test fully provided script.",
"type": "string"
},
"yml-path-to-test": {
"title": "Optional YML path to test from the playbook, used for test-type=cloned-playbook.",
"description": "Optional YML path to test from the playbook, provided as relative path to playbook root.\nThis parameter will use local git clone of [slapos-package] part, from which playbook will be constructed.",
"type": "string"
}
},
"required": [
"image-to-test-url",
"image-to-test-md5sum",
"test-type"
]
}
instance.cfg.in 0000664 0000000 0000000 00000004726 14241130220 0035326 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite/deploy-test [buildout]
extends = ${template:output}
parts +=
runTestSuite
copy-test-data
[copy-test-data]
# Note: This is a hack, as such while requesting KVM it is not possible
# to fill correctly http directory
# Note: This will only work inside of non-user related environment
# dest shell variable will evaluate to requested partition's
# srv/public directory
# The "if" statement makes this script be run only in partition
# *without* srv/public - so according to current knowledge, the
# one which requests KVM backend for testing
recipe = plone.recipe.command
stop-on-error = true
update-command = $${:command}
command =
if [ ! -d srv/public ] ; then
dest=`echo ../*/srv/public`
cp ${playbook:output} $dest/playbook.tar.gz &&
cp ${standalone-local-playbook:location} $dest/standalone-local-playbook
fi
[runTestSuite]
recipe = slapos.recipe.template:jinja2
rendered = $${buildout:directory}/bin/$${:_buildout_section_name_}
template = inline:
#!/bin/sh
export PATH=${python2.7-with-eggs:location}:$PATH
exec ${buildout:bin-directory}/${runTestSuite_py:interpreter} ${:_profile_base_location_}/runTestSuite.py --partition_ipv4 {{ list(partition_ipv4)[0] }} --partition_path $${buildout:directory} --test_reference "{{ slapparameter_dict.get('image-to-test-url') }} {{ slapparameter_dict.get('script-to-test-url')}}" --test_location "${test-location:base}/{{ slapparameter_dict.get('test-relative-directory', 'playbook/roles/standalone-shared') }}" --python_interpreter=${buildout:bin-directory}/${runTestSuite_py:interpreter} "$@"
mode = 0755
context =
key slapparameter_dict slap-configuration:configuration
key partition_ipv4 slap-configuration:ipv4
[switch_softwaretype]
default = $${:deploy-test}
deploy-test = dynamic-template-deploy-test:rendered
[dynamic-template-deploy-test]
recipe = slapos.recipe.template:jinja2
template = ${:_profile_base_location_}/instance-deploy-test.cfg.jinja2
rendered = $${buildout:directory}/template-deploy-test.cfg
context =
key develop_eggs_directory buildout:develop-eggs-directory
key eggs_directory buildout:eggs-directory
key slapparameter_dict slap-configuration:configuration
raw bin_directory ${buildout:bin-directory}
raw in_vm_test_script ${deploy-script-controller-script:location}
raw in_vm_test_script_md5 ${deploy-script-controller-script:md5sum}
raw waittime ${deploy-script-controller-script:waittime}
raw tries ${deploy-script-controller-script:tries}
mode = 0644
runTestSuite.py 0000664 0000000 0000000 00000015174 14241130220 0035463 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite/deploy-test from __future__ import print_function
import argparse
import os
import glob
from time import gmtime, strftime, time, sleep
from erp5.util import taskdistribution, testsuite
import logging
import sys
import tempfile
import json
SLEEP_TIME = 15
TRY_AMOUNT = 3600
def waitForSite(partition_path):
status_dict = {'command': 'file not found'}
# find test result, wait 10h
try_num = 1
start = time()
result_found = False
while 1:
finished = False
try_info = 'Try %s/%s: ' % (try_num, TRY_AMOUNT)
test_result_glob = os.path.join(
partition_path,
'..',
'*',
'srv',
'public',
'test-script-result',
)
print(try_info + 'Waiting for data in %r.' % (test_result_glob,))
result_list = glob.glob(test_result_glob)
if len(result_list) > 0:
result_path = result_list[0]
print(try_info + 'Data directory %r found, looking for results.' % (
result_path,))
result_file_list = list((
os.path.join(dirname, filename)
for dirname, dirnames, filenames in os.walk(result_path)
for filename in filenames
))
if len(result_file_list):
print(try_info + 'No result posted, will check next try.')
for result_file in result_file_list:
print(try_info + 'Data found.')
result_found = True
result_file = os.path.abspath(result_file)
status_dict['command'] = result_file
result = open(result_file).read()
# remove result, as it is not required anymore
os.unlink(result_file)
print(try_info + 'Analysis of result %r:' % (result_file,))
print(try_info + result)
status_dict['stderr'] = 'Last result:\n%s' % (result,)
if 'FATAL: all hosts have already failed -- aborting' in result:
# failed
status_dict.update(
success=False
)
finished = False
status_dict['stdout'] = try_info + 'Build not yet successful.'
print(try_info + '%r: Found not yet finished run.' % (result_file,))
elif "Build successful, connect to:" in result:
# success
status_dict.update(
success=True
)
finished = True
print(try_info + '%r: Found finished successful run.' % (
result_file,))
status_dict['stdout'] = try_info + 'Build successful.'
break
else:
# unknown
status_dict.update(
success=False
)
status_dict['stdout'] = \
try_info + 'Cannot find success nor failure result in the output'
print(try_info + '%r: Found unknown run.' % (result_file,))
finished = False
if finished:
break
if try_num >= TRY_AMOUNT:
msg = try_info + 'Time exceeded, success not found.'
print(msg)
status_dict.setdefault('stdout', '')
status_dict['stdout'] = '\n'.join([status_dict['stdout'], msg])
break
try_num += 1
print(try_info + 'Sleeping for %ss.' % (SLEEP_TIME,))
sleep(SLEEP_TIME)
if not result_found:
status_dict['stdout'] = try_info + 'Test timed out and no result found.'
status_dict.update(
success=False
)
end = time()
status_dict.update(
date=strftime("%Y/%m/%d %H:%M:%S", gmtime(end)),
duration=end - start,
)
print(try_info + 'status_dict %r' % (status_dict,))
return status_dict
def main():
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.DEBUG)
parser = argparse.ArgumentParser(description='Run a test suite.')
parser.add_argument('--test_suite', help='The test suite name')
parser.add_argument('--test_suite_title', help='The test suite title')
parser.add_argument('--test_node_title', help='The test node title')
parser.add_argument('--project_title', help='The project title')
parser.add_argument('--revision', help='The revision to test',
default='dummy_revision')
parser.add_argument('--node_quantity', type=int,
help='Number of CPUs to use for the VM')
parser.add_argument('--master_url',
help='The Url of Master controlling test suites')
# SlapOS and deploy test specific
parser.add_argument(
'--partition_path',
help="Path of a partition",
default=os.path.abspath(os.getcwd()))
parser.add_argument(
'--test_reference',
help="Reference of the test",
default="missing"
)
parser.add_argument(
'--partition_ipv4',
help="IPv4 of a partition"
)
parser.add_argument(
'--test_location',
help="Location of the tests"
)
parser.add_argument(
'--python_interpreter',
help="Path to python interpreter used to run the test suite"
)
args = parser.parse_args()
revision = args.revision
test_suite_title = args.test_suite_title or args.test_suite
# TODO: rewrite this unsing nxdtest, EggTestSuite no longer exist in erp5.util
suite = testsuite.EggTestSuite(
1, test_suite=args.test_suite, node_quantity=args.node_quantity,
python_interpreter=args.python_interpreter,
shared_part_list=os.environ.get('SLAPOS_TEST_SHARED_PART_LIST', ''),
log_directory=os.environ.get('SLAPOS_TEST_LOG_DIRECTORY', ''),
egg_test_path_dict={
os.path.basename(os.path.normpath(path)): path
for path in args.test_location.split(',')},
revision=revision)
access_url_http = None
access_url_https = None
if args.partition_ipv4:
access_url_http = 'http://%s:10080' % (args.partition_ipv4,)
access_url_https = 'https://%s:10443' % (args.partition_ipv4,)
os.environ['TEST_ACCESS_URL_HTTP'] = access_url_http
os.environ['TEST_ACCESS_URL_HTTPS'] = access_url_https
distributor = taskdistribution.TaskDistributor(
args.master_url,
logger=logger)
test_result = distributor.createTestResult(
revision, suite.getTestList(), args.test_node_title,
suite.allow_restart, test_suite_title, args.project_title)
if test_result is None:
return
# Create the site
status_dict = waitForSite(args.partition_path)
status_file = tempfile.NamedTemporaryFile()
status_file.write(json.dumps(status_dict))
status_file.flush()
os.fsync(status_file.fileno())
os.environ['TEST_SITE_STATUS_JSON'] = status_file.name
assert revision == test_result.revision, (revision, test_result.revision)
while suite.acquire():
test = test_result.start(suite.running.keys())
if test is not None:
suite.start(test.name, lambda status_dict,
__test=test: __test.stop(**status_dict))
elif not suite.running:
break
return
if __name__ == "__main__":
main()
software.cfg 0000664 0000000 0000000 00000003644 14241130220 0034745 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite/deploy-test [buildout]
extends =
buildout.hash.cfg
https://lab.nexedi.com/nexedi/slapos/raw/1.0.232/software/kvm/software.cfg
parts =
python-with-eggs
template-deploy-test
runTestSuite_py
playbook
[runTestSuite_py]
recipe = zc.recipe.egg
eggs =
erp5.util
interpreter = ${:_buildout_section_name_}
[python2.7-with-eggs]
# create interpreters named "python" and "python2.7" so that
# instance profile can use them in $PATH
recipe = plone.recipe.command
location = ${buildout:parts-directory}/${:_buildout_section_name_}
stop-on-error = true
command =
rm -fr ${:location} &&
mkdir -p ${:location} &&
ln -s ${buildout:bin-directory}/${python-with-eggs:interpreter} ${:location}/python &&
ln -s ${buildout:bin-directory}/${python-with-eggs:interpreter} ${:location}/python2.7
update-command = ${:command}
[playbook]
recipe = plone.recipe.command
stop-on-error = true
environment = export PATH=${tar:location}/bin:${gzip:location}/bin:$PATH
location = ${buildout:parts-directory}/${:_buildout_section_name_}
output = ${:location}/playbook.tar.gz
command =
${:environment}
rm -fr ${:location}
mkdir -p ${:location}
cd ${slapos-package:location}/playbook
tar czf ${:output} .
update-command = ${:command}
[test-location]
base = ${slapos-package:location}
[slapos-package]
recipe = slapos.recipe.build:gitclone
repository = https://lab.nexedi.com/nexedi/slapos.package.git
branch = cdn-test
git-executable = ${git:location}/bin/git
[template]
output = ${buildout:directory}/template-original.kvm.cfg
[deploy-script-controller-script]
location = ${:_profile_base_location_}/${:filename}
# configuration
waittime = 360
tries = 80
[standalone-local-playbook]
filename = standalone-local-playbook
location = ${:_profile_base_location_}/${:filename}
[template-deploy-test]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/template.cfg
mode = 0644
[versions]
erp5.util = 0.4.69
software.cfg.json 0000664 0000000 0000000 00000000452 14241130220 0035707 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite/deploy-test {
"name": "Deploy Test",
"description": "Deploy Testing software release",
"serialisation": "xml",
"software-type": {
"default": {
"title": "Default",
"description": "Standard ERP5TestNode hooked",
"request": "instance-input-schema.json",
"index": 0
}
}
}
standalone-local-playbook 0000664 0000000 0000000 00000210441 14241130220 0037406 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite/deploy-test #!/bin/bash
set -e
#
# Paste content of function-common
# https://lab.nexedi.com/nexedi/slapos.package/blob/master/playbook/roles/install-script/files/function-common
#
#!/bin/bash
#
# functions-common - Common functions used by DevStack components
#
# The canonical copy of this file is maintained in the DevStack repo.
# All modifications should be made there and then sync'ed to other repos
# as required.
#
# This file is sorted alphabetically within the function groups.
#
# - Config Functions
# - Control Functions
# - Distro Functions
# - Git Functions
# - OpenStack Functions
# - Package Functions
# - Process Functions
# - Service Functions
# - System Functions
#
# The following variables are assumed to be defined by certain functions:
#
# - ``ENABLED_SERVICES``
# - ``ERROR_ON_CLONE``
# - ``FILES``
# - ``OFFLINE``
# - ``RECLONE``
# - ``REQUIREMENTS_DIR``
# - ``STACK_USER``
# - ``TRACK_DEPENDS``
# - ``UNDO_REQUIREMENTS``
# - ``http_proxy``, ``https_proxy``, ``no_proxy``
#
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
# Global Config Variables
declare -A GITREPO
declare -A GITBRANCH
declare -A GITDIR
TRACK_DEPENDS=${TRACK_DEPENDS:-False}
# Normalize config values to True or False
# Accepts as False: 0 no No NO false False FALSE
# Accepts as True: 1 yes Yes YES true True TRUE
# VAR=$(trueorfalse default-value test-value)
function trueorfalse {
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local default=$1
local literal=$2
local testval=${!literal:-}
[[ -z "$testval" ]] && { echo "$default"; return; }
[[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; }
[[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; }
echo "$default"
$xtrace
}
#function isset {
# [[ -v "$1" ]]
#}
#
# Control Functions
# =================
# Prints backtrace info
# filename:lineno:function
# backtrace level
function backtrace {
local level=$1
local deep=$((${#BASH_SOURCE[@]} - 1))
echo "[Call Trace]"
while [ $level -le $deep ]; do
echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}"
deep=$((deep - 1))
done
}
# Prints line number and "message" then exits
# die $LINENO "message"
function die {
local exitcode=$?
set +o xtrace
local line=$1; shift
if [ $exitcode == 0 ]; then
exitcode=1
fi
backtrace 2
err $line "$*"
# Give buffers a second to flush
sleep 1
exit $exitcode
}
# Checks an environment variable is not set or has length 0 OR if the
# exit code is non-zero and prints "message" and exits
# NOTE: env-var is the variable name without a '$'
# die_if_not_set $LINENO env-var "message"
function die_if_not_set {
local exitcode=$?
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local line=$1; shift
local evar=$1; shift
if ! is_set $evar || [ $exitcode != 0 ]; then
die $line "$*"
fi
$xtrace
}
# Prints line number and "message" in error format
# err $LINENO "message"
function err {
local exitcode=$?
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2"
echo $msg 1>&2;
if [[ -n ${LOGDIR} ]]; then
echo $msg >> "${LOGDIR}/error.log"
fi
$xtrace
return $exitcode
}
# Checks an environment variable is not set or has length 0 OR if the
# exit code is non-zero and prints "message"
# NOTE: env-var is the variable name without a '$'
# err_if_not_set $LINENO env-var "message"
function err_if_not_set {
local exitcode=$?
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local line=$1; shift
local evar=$1; shift
if ! is_set $evar || [ $exitcode != 0 ]; then
err $line "$*"
fi
$xtrace
return $exitcode
}
# Exit after outputting a message about the distribution not being supported.
# exit_distro_not_supported [optional-string-telling-what-is-missing]
function exit_distro_not_supported {
if [[ -z "$DISTRO" ]]; then
GetDistro
fi
if [ $# -gt 0 ]; then
die $LINENO "Support for $DISTRO is incomplete: no support for $@"
else
die $LINENO "Support for $DISTRO is incomplete."
fi
}
# Test if the named environment variable is set and not zero length
# is_set env-var
function is_set {
local var=\$"$1"
eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this
}
# Prints line number and "message" in warning format
# warn $LINENO "message"
function warn {
local exitcode=$?
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2"
echo $msg 1>&2;
if [[ -n ${LOGDIR} ]]; then
echo $msg >> "${LOGDIR}/error.log"
fi
$xtrace
return $exitcode
}
# Distro Functions
# ================
# Determine OS Vendor, Release and Update
# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora
# Returns results in global variables:
# ``os_VENDOR`` - vendor name: ``Ubuntu``, ``Fedora``, etc
# ``os_RELEASE`` - major release: ``14.04`` (Ubuntu), ``20`` (Fedora)
# ``os_UPDATE`` - update: ex. the ``5`` in ``RHEL6.5``
# ``os_PACKAGE`` - package type: ``deb`` or ``rpm``
# ``os_CODENAME`` - vendor's codename for release: ``snow leopard``, ``trusty``
os_VENDOR=""
os_RELEASE=""
os_UPDATE=""
os_PACKAGE=""
os_CODENAME=""
# GetOSVersion
function GetOSVersion {
# Figure out which vendor we are
if [[ -x "`which sw_vers 2>/dev/null`" ]]; then
# OS/X
os_VENDOR=`sw_vers -productName`
os_RELEASE=`sw_vers -productVersion`
os_UPDATE=${os_RELEASE##*.}
os_RELEASE=${os_RELEASE%.*}
os_PACKAGE=""
if [[ "$os_RELEASE" =~ "10.7" ]]; then
os_CODENAME="lion"
elif [[ "$os_RELEASE" =~ "10.6" ]]; then
os_CODENAME="snow leopard"
elif [[ "$os_RELEASE" =~ "10.5" ]]; then
os_CODENAME="leopard"
elif [[ "$os_RELEASE" =~ "10.4" ]]; then
os_CODENAME="tiger"
elif [[ "$os_RELEASE" =~ "10.3" ]]; then
os_CODENAME="panther"
else
os_CODENAME=""
fi
elif [[ -x $(which lsb_release 2>/dev/null) ]]; then
os_VENDOR=$(lsb_release -i -s)
os_RELEASE=$(lsb_release -r -s)
os_UPDATE=""
os_PACKAGE="rpm"
if [[ "Debian,Ubuntu,LinuxMint,Raspbian" =~ $os_VENDOR ]]; then
os_PACKAGE="deb"
elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then
lsb_release -d -s | grep -q openSUSE
if [[ $? -eq 0 ]]; then
os_VENDOR="openSUSE"
fi
elif [[ $os_VENDOR == "openSUSE project" ]]; then
os_VENDOR="openSUSE"
elif [[ $os_VENDOR =~ Red.*Hat ]]; then
os_VENDOR="Red Hat"
fi
os_CODENAME=$(lsb_release -c -s)
elif [[ -r /etc/redhat-release ]]; then
# Red Hat Enterprise Linux Server release 5.5 (Tikanga)
# Red Hat Enterprise Linux Server release 7.0 Beta (Maipo)
# CentOS release 5.5 (Final)
# CentOS Linux release 6.0 (Final)
# Fedora release 16 (Verne)
# XenServer release 6.2.0-70446c (xenenterprise)
# Oracle Linux release 7
os_CODENAME=""
for r in "Red Hat" CentOS Fedora XenServer; do
os_VENDOR=$r
if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then
ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release`
os_CODENAME=${ver#*|}
os_RELEASE=${ver%|*}
os_UPDATE=${os_RELEASE##*.}
os_RELEASE=${os_RELEASE%.*}
break
fi
os_VENDOR=""
done
if [ "$os_VENDOR" = "Red Hat" ] && [[ -r /etc/oracle-release ]]; then
os_VENDOR=OracleLinux
fi
os_PACKAGE="rpm"
elif [[ -r /etc/SuSE-release ]]; then
for r in openSUSE "SUSE Linux"; do
if [[ "$r" = "SUSE Linux" ]]; then
os_VENDOR="SUSE LINUX"
else
os_VENDOR=$r
fi
if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then
os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'`
os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'`
os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'`
break
fi
os_VENDOR=""
done
os_PACKAGE="rpm"
# If lsb_release is not installed, we should be able to detect Debian OS
elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then
os_VENDOR="Debian"
os_PACKAGE="deb"
os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}')
os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g')
fi
export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME
}
# Translate the OS version values into common nomenclature
# Sets global ``DISTRO`` from the ``os_*`` values
declare DISTRO
function GetDistro {
GetOSVersion
if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then
# 'Everyone' refers to Ubuntu / Debian releases by the code name adjective
DISTRO=$os_CODENAME
elif [[ "$os_VENDOR" =~ (Fedora) ]]; then
# For Fedora, just use 'f' and the release
DISTRO="f$os_RELEASE"
elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then
DISTRO="opensuse-$os_RELEASE"
elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then
# For SLE, also use the service pack
if [[ -z "$os_UPDATE" ]]; then
DISTRO="sle${os_RELEASE}"
else
DISTRO="sle${os_RELEASE}sp${os_UPDATE}"
fi
elif [[ "$os_VENDOR" =~ (Red Hat) || \
"$os_VENDOR" =~ (CentOS) || \
"$os_VENDOR" =~ (OracleLinux) ]]; then
# Drop the . release as we assume it's compatible
DISTRO="rhel${os_RELEASE::1}"
elif [[ "$os_VENDOR" =~ (XenServer) ]]; then
DISTRO="xs$os_RELEASE"
else
# Catch-all for now is Vendor + Release + Update
DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE"
fi
export DISTRO
}
# Utility function for checking machine architecture
# is_arch arch-type
function is_arch {
[[ "$(uname -m)" == "$1" ]]
}
# Determine if current distribution is an Oracle distribution
# is_oraclelinux
function is_oraclelinux {
if [[ -z "$os_VENDOR" ]]; then
GetOSVersion
fi
[ "$os_VENDOR" = "OracleLinux" ]
}
# Determine if current distribution is a Fedora-based distribution
# (Fedora, RHEL, CentOS, etc).
# is_fedora
function is_fedora {
if [[ -z "$os_VENDOR" ]]; then
GetOSVersion
fi
[ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \
[ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleLinux" ]
}
# Determine if current distribution is a SUSE-based distribution
# (openSUSE, SLE).
# is_suse
function is_suse {
if [[ -z "$os_VENDOR" ]]; then
GetOSVersion
fi
[ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ]
}
# Determine if current distribution is an Ubuntu-based distribution
# It will also detect non-Ubuntu but Debian-based distros
# is_ubuntu
function is_ubuntu {
if [[ -z "$os_PACKAGE" ]]; then
GetOSVersion
fi
[ "$os_PACKAGE" = "deb" ]
}
# Git Functions
# =============
# Returns openstack release name for a given branch name
# ``get_release_name_from_branch branch-name``
function get_release_name_from_branch {
local branch=$1
if [[ $branch =~ "stable/" || $branch =~ "proposed/" ]]; then
echo ${branch#*/}
else
echo "master"
fi
}
# git clone only if directory doesn't exist already. Since ``DEST`` might not
# be owned by the installation user, we create the directory and change the
# ownership to the proper user.
# Set global ``RECLONE=yes`` to simulate a clone when dest-dir exists
# Set global ``ERROR_ON_CLONE=True`` to abort execution with an error if the git repo
# does not exist (default is False, meaning the repo will be cloned).
# Uses globals ``ERROR_ON_CLONE``, ``OFFLINE``, ``RECLONE``
# git_clone remote dest-dir branch
function git_clone {
local git_remote=$1
local git_dest=$2
local git_ref=$3
local orig_dir=$(pwd)
local git_clone_flags=""
RECLONE=$(trueorfalse False RECLONE)
if [[ "${GIT_DEPTH}" -gt 0 ]]; then
git_clone_flags="$git_clone_flags --depth $GIT_DEPTH"
fi
if [[ "$OFFLINE" = "True" ]]; then
echo "Running in offline mode, clones already exist"
# print out the results so we know what change was used in the logs
cd $git_dest
git show --oneline | head -1
cd $orig_dir
return
fi
if echo $git_ref | egrep -q "^refs"; then
# If our branch name is a gerrit style refs/changes/...
if [[ ! -d $git_dest ]]; then
[[ "$ERROR_ON_CLONE" = "True" ]] && \
die $LINENO "Cloning not allowed in this configuration"
git_timed clone $git_clone_flags $git_remote $git_dest
fi
cd $git_dest
git_timed fetch $git_remote $git_ref && git checkout FETCH_HEAD
else
# do a full clone only if the directory doesn't exist
if [[ ! -d $git_dest ]]; then
[[ "$ERROR_ON_CLONE" = "True" ]] && \
die $LINENO "Cloning not allowed in this configuration"
git_timed clone $git_clone_flags $git_remote $git_dest
cd $git_dest
# This checkout syntax works for both branches and tags
git checkout $git_ref
elif [[ "$RECLONE" = "True" ]]; then
# if it does exist then simulate what clone does if asked to RECLONE
cd $git_dest
# set the url to pull from and fetch
git remote set-url origin $git_remote
git_timed fetch origin
# remove the existing ignored files (like pyc) as they cause breakage
# (due to the py files having older timestamps than our pyc, so python
# thinks the pyc files are correct using them)
find $git_dest -name '*.pyc' -delete
# handle git_ref accordingly to type (tag, branch)
if [[ -n "`git show-ref refs/tags/$git_ref`" ]]; then
git_update_tag $git_ref
elif [[ -n "`git show-ref refs/heads/$git_ref`" ]]; then
git_update_branch $git_ref
elif [[ -n "`git show-ref refs/remotes/origin/$git_ref`" ]]; then
git_update_remote_branch $git_ref
else
die $LINENO "$git_ref is neither branch nor tag"
fi
fi
fi
# print out the results so we know what change was used in the logs
cd $git_dest
git show --oneline | head -1
cd $orig_dir
}
# A variation on git clone that lets us specify a project by it's
# actual name, like oslo.config. This is exceptionally useful in the
# library installation case
function git_clone_by_name {
local name=$1
local repo=${GITREPO[$name]}
local dir=${GITDIR[$name]}
local branch=${GITBRANCH[$name]}
git_clone $repo $dir $branch
}
# git can sometimes get itself infinitely stuck with transient network
# errors or other issues with the remote end. This wraps git in a
# timeout/retry loop and is intended to watch over non-local git
# processes that might hang. GIT_TIMEOUT, if set, is passed directly
# to timeout(1); otherwise the default value of 0 maintains the status
# quo of waiting forever.
# usage: git_timed
function git_timed {
local count=0
local timeout=0
if [[ -n "${GIT_TIMEOUT}" ]]; then
timeout=${GIT_TIMEOUT}
fi
until timeout -s SIGINT ${timeout} git "$@"; do
# 124 is timeout(1)'s special return code when it reached the
# timeout; otherwise assume fatal failure
if [[ $? -ne 124 ]]; then
die $LINENO "git call failed: [git $@]"
fi
count=$(($count + 1))
warn "timeout ${count} for git call: [git $@]"
if [ $count -eq 3 ]; then
die $LINENO "Maximum of 3 git retries reached"
fi
sleep 5
done
}
# git update using reference as a branch.
# git_update_branch ref
function git_update_branch {
local git_branch=$1
git checkout -f origin/$git_branch
# a local branch might not exist
git branch -D $git_branch || true
git checkout -b $git_branch
}
# git update using reference as a branch.
# git_update_remote_branch ref
function git_update_remote_branch {
local git_branch=$1
git checkout -b $git_branch -t origin/$git_branch
}
# git update using reference as a tag. Be careful editing source at that repo
# as working copy will be in a detached mode
# git_update_tag ref
function git_update_tag {
local git_tag=$1
git tag -d $git_tag
# fetching given tag only
git_timed fetch origin tag $git_tag
git checkout -f $git_tag
}
# OpenStack Functions
# ===================
# Get the default value for HOST_IP
# get_default_host_ip fixed_range floating_range host_ip_iface host_ip
function get_default_host_ip {
local fixed_range=$1
local floating_range=$2
local host_ip_iface=$3
local host_ip=$4
# Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then
host_ip=""
# Find the interface used for the default route
host_ip_iface=${host_ip_iface:-$(ip route | awk '/default/ {print $5}' | head -1)}
local host_ips=$(LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/"); print parts[1]}')
local ip
for ip in $host_ips; do
# Attempt to filter out IP addresses that are part of the fixed and
# floating range. Note that this method only works if the ``netaddr``
# python library is installed. If it is not installed, an error
# will be printed and the first IP from the interface will be used.
# If that is not correct set ``HOST_IP`` in ``localrc`` to the correct
# address.
if ! (address_in_net $ip $fixed_range || address_in_net $ip $floating_range); then
host_ip=$ip
break;
fi
done
fi
echo $host_ip
}
# Generates hex string from ``size`` byte of pseudo random data
# generate_hex_string size
function generate_hex_string {
local size=$1
hexdump -n "$size" -v -e '/1 "%02x"' /dev/urandom
}
# Grab a numbered field from python prettytable output
# Fields are numbered starting with 1
# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc.
# get_field field-number
function get_field {
local data field
while read data; do
if [ "$1" -lt 0 ]; then
field="(\$(NF$1))"
else
field="\$$(($1 + 1))"
fi
echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}"
done
}
# install default policy
# copy over a default policy.json and policy.d for projects
function install_default_policy {
local project=$1
local project_uc=$(echo $1|tr a-z A-Z)
local conf_dir="${project_uc}_CONF_DIR"
# eval conf dir to get the variable
conf_dir="${!conf_dir}"
local project_dir="${project_uc}_DIR"
# eval project dir to get the variable
project_dir="${!project_dir}"
local sample_conf_dir="${project_dir}/etc/${project}"
local sample_policy_dir="${project_dir}/etc/${project}/policy.d"
# first copy any policy.json
cp -p $sample_conf_dir/policy.json $conf_dir
# then optionally copy over policy.d
if [[ -d $sample_policy_dir ]]; then
cp -r $sample_policy_dir $conf_dir/policy.d
fi
}
# Add a policy to a policy.json file
# Do nothing if the policy already exists
# ``policy_add policy_file policy_name policy_permissions``
function policy_add {
local policy_file=$1
local policy_name=$2
local policy_perm=$3
if grep -q ${policy_name} ${policy_file}; then
echo "Policy ${policy_name} already exists in ${policy_file}"
return
fi
# Add a terminating comma to policy lines without one
# Remove the closing '}' and all lines following to the end-of-file
local tmpfile=$(mktemp)
uniq ${policy_file} | sed -e '
s/]$/],/
/^[}]/,$d
' > ${tmpfile}
# Append policy and closing brace
echo " \"${policy_name}\": ${policy_perm}" >>${tmpfile}
echo "}" >>${tmpfile}
mv ${tmpfile} ${policy_file}
}
# Gets or creates a domain
# Usage: get_or_create_domain
function get_or_create_domain {
local os_url="$KEYSTONE_SERVICE_URI_V3"
# Gets domain id
local domain_id=$(
# Gets domain id
openstack --os-token=$OS_TOKEN --os-url=$os_url \
--os-identity-api-version=3 domain show $1 \
-f value -c id 2>/dev/null ||
# Creates new domain
openstack --os-token=$OS_TOKEN --os-url=$os_url \
--os-identity-api-version=3 domain create $1 \
--description "$2" \
-f value -c id
)
echo $domain_id
}
# Gets or creates group
# Usage: get_or_create_group [ ]
function get_or_create_group {
local domain=${2:+--domain ${2}}
local desc="${3:-}"
local os_url="$KEYSTONE_SERVICE_URI_V3"
# Gets group id
local group_id=$(
# Creates new group with --or-show
openstack --os-token=$OS_TOKEN --os-url=$os_url \
--os-identity-api-version=3 group create $1 \
$domain --description "$desc" --or-show \
-f value -c id
)
echo $group_id
}
# Gets or creates user
# Usage: get_or_create_user [ []]
function get_or_create_user {
if [[ ! -z "$3" ]]; then
local email="--email=$3"
else
local email=""
fi
local os_cmd="openstack"
local domain=""
if [[ ! -z "$4" ]]; then
domain="--domain=$4"
os_cmd="$os_cmd --os-url=$KEYSTONE_SERVICE_URI_V3 --os-identity-api-version=3"
fi
# Gets user id
local user_id=$(
# Creates new user with --or-show
$os_cmd user create \
$1 \
--password "$2" \
$email \
$domain \
--or-show \
-f value -c id
)
echo $user_id
}
# Gets or creates project
# Usage: get_or_create_project []
function get_or_create_project {
# Gets project id
local os_cmd="openstack"
local domain=""
if [[ ! -z "$2" ]]; then
domain="--domain=$2"
os_cmd="$os_cmd --os-url=$KEYSTONE_SERVICE_URI_V3 --os-identity-api-version=3"
fi
local project_id=$(
# Creates new project with --or-show
$os_cmd project create $1 $domain --or-show -f value -c id
)
echo $project_id
}
# Gets or creates role
# Usage: get_or_create_role
function get_or_create_role {
local role_id=$(
# Creates role with --or-show
openstack role create $1 --or-show -f value -c id
)
echo $role_id
}
# Gets or adds user role to project
# Usage: get_or_add_user_project_role
function get_or_add_user_project_role {
# Gets user role id
local user_role_id=$(openstack role list \
--user $2 \
--project $3 \
--column "ID" \
--column "Name" \
| grep " $1 " | get_field 1)
if [[ -z "$user_role_id" ]]; then
# Adds role to user
user_role_id=$(openstack role add \
$1 \
--user $2 \
--project $3 \
| grep " id " | get_field 2)
fi
echo $user_role_id
}
# Gets or adds group role to project
# Usage: get_or_add_group_project_role
function get_or_add_group_project_role {
# Gets group role id
local group_role_id=$(openstack role list \
--group $2 \
--project $3 \
--column "ID" \
--column "Name" \
| grep " $1 " | get_field 1)
if [[ -z "$group_role_id" ]]; then
# Adds role to group
group_role_id=$(openstack role add \
$1 \
--group $2 \
--project $3 \
| grep " id " | get_field 2)
fi
echo $group_role_id
}
# Gets or creates service
# Usage: get_or_create_service
function get_or_create_service {
# Gets service id
local service_id=$(
# Gets service id
openstack service show $1 -f value -c id 2>/dev/null ||
# Creates new service if not exists
openstack service create \
$2 \
--name $1 \
--description="$3" \
-f value -c id
)
echo $service_id
}
# Gets or creates endpoint
# Usage: get_or_create_endpoint
function get_or_create_endpoint {
# Gets endpoint id
local endpoint_id=$(openstack endpoint list \
--column "ID" \
--column "Region" \
--column "Service Name" \
| grep " $2 " \
| grep " $1 " | get_field 1)
if [[ -z "$endpoint_id" ]]; then
# Creates new endpoint
endpoint_id=$(openstack endpoint create \
$1 \
--region $2 \
--publicurl $3 \
--adminurl $4 \
--internalurl $5 \
| grep " id " | get_field 2)
fi
echo $endpoint_id
}
# Package Functions
# =================
# _get_package_dir
function _get_package_dir {
local base_dir=$1
local pkg_dir
if [[ -z "$base_dir" ]]; then
base_dir=$FILES
fi
if is_ubuntu; then
pkg_dir=$base_dir/debs
elif is_fedora; then
pkg_dir=$base_dir/rpms
elif is_suse; then
pkg_dir=$base_dir/rpms-suse
else
exit_distro_not_supported "list of packages"
fi
echo "$pkg_dir"
}
# Wrapper for ``apt-get`` to set cache and proxy environment variables
# Uses globals ``OFFLINE``, ``*_proxy``
# apt_get operation package [package ...]
function apt_get {
local xtrace=$(set +o | grep xtrace)
set +o xtrace
[[ "$OFFLINE" = "True" || -z "$@" ]] && return
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
$xtrace
$sudo DEBIAN_FRONTEND=noninteractive \
http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} \
no_proxy=${no_proxy:-} \
apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
}
function _parse_package_files {
local files_to_parse=$@
if [[ -z "$DISTRO" ]]; then
GetDistro
fi
for fname in ${files_to_parse}; do
local OIFS line package distros distro
[[ -e $fname ]] || continue
OIFS=$IFS
IFS=$'\n'
for line in $(<${fname}); do
if [[ $line =~ "NOPRIME" ]]; then
continue
fi
# Assume we want this package
package=${line%#*}
inst_pkg=1
# Look for # dist:xxx in comment
if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then
# We are using BASH regexp matching feature.
package=${BASH_REMATCH[1]}
distros=${BASH_REMATCH[2]}
# In bash ${VAR,,} will lowecase VAR
# Look for a match in the distro list
if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then
# If no match then skip this package
inst_pkg=0
fi
fi
if [[ $inst_pkg = 1 ]]; then
echo $package
fi
done
IFS=$OIFS
done
}
# get_packages() collects a list of package names of any type from the
# prerequisite files in ``files/{debs|rpms}``. The list is intended
# to be passed to a package installer such as apt or yum.
#
# Only packages required for the services in 1st argument will be
# included. Two bits of metadata are recognized in the prerequisite files:
#
# - ``# NOPRIME`` defers installation to be performed later in `stack.sh`
# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
# of the package to the distros listed. The distro names are case insensitive.
function get_packages {
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local services=$@
local package_dir=$(_get_package_dir)
local file_to_parse=""
local service=""
if [[ -z "$package_dir" ]]; then
echo "No package directory supplied"
return 1
fi
for service in ${services//,/ }; do
# Allow individual services to specify dependencies
if [[ -e ${package_dir}/${service} ]]; then
file_to_parse="${file_to_parse} ${package_dir}/${service}"
fi
# NOTE(sdague) n-api needs glance for now because that's where
# glance client is
if [[ $service == n-api ]]; then
if [[ ! $file_to_parse =~ $package_dir/nova ]]; then
file_to_parse="${file_to_parse} ${package_dir}/nova"
fi
if [[ ! $file_to_parse =~ $package_dir/glance ]]; then
file_to_parse="${file_to_parse} ${package_dir}/glance"
fi
elif [[ $service == c-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/cinder ]]; then
file_to_parse="${file_to_parse} ${package_dir}/cinder"
fi
elif [[ $service == ceilometer-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/ceilometer ]]; then
file_to_parse="${file_to_parse} ${package_dir}/ceilometer"
fi
elif [[ $service == s-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/swift ]]; then
file_to_parse="${file_to_parse} ${package_dir}/swift"
fi
elif [[ $service == n-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/nova ]]; then
file_to_parse="${file_to_parse} ${package_dir}/nova"
fi
elif [[ $service == g-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/glance ]]; then
file_to_parse="${file_to_parse} ${package_dir}/glance"
fi
elif [[ $service == key* ]]; then
if [[ ! $file_to_parse =~ $package_dir/keystone ]]; then
file_to_parse="${file_to_parse} ${package_dir}/keystone"
fi
elif [[ $service == q-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/neutron ]]; then
file_to_parse="${file_to_parse} ${package_dir}/neutron"
fi
elif [[ $service == ir-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/ironic ]]; then
file_to_parse="${file_to_parse} ${package_dir}/ironic"
fi
fi
done
echo "$(_parse_package_files $file_to_parse)"
$xtrace
}
# get_plugin_packages() collects a list of package names of any type from a
# plugin's prerequisite files in ``$PLUGIN/devstack/files/{debs|rpms}``. The
# list is intended to be passed to a package installer such as apt or yum.
#
# Only packages required for enabled and collected plugins will included.
#
# The same metadata used in the main DevStack prerequisite files may be used
# in these prerequisite files, see get_packages() for more info.
function get_plugin_packages {
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local files_to_parse=""
local package_dir=""
for plugin in ${DEVSTACK_PLUGINS//,/ }; do
local package_dir="$(_get_package_dir ${GITDIR[$plugin]}/devstack/files)"
files_to_parse+="$package_dir/$plugin"
done
echo "$(_parse_package_files $files_to_parse)"
$xtrace
}
# Distro-agnostic package installer
# Uses globals ``NO_UPDATE_REPOS``, ``REPOS_UPDATED``, ``RETRY_UPDATE``
# install_package package [package ...]
function update_package_repo {
NO_UPDATE_REPOS=${NO_UPDATE_REPOS:-False}
REPOS_UPDATED=${REPOS_UPDATED:-False}
RETRY_UPDATE=${RETRY_UPDATE:-False}
if [[ "$NO_UPDATE_REPOS" = "True" ]]; then
return 0
fi
if is_ubuntu; then
local xtrace=$(set +o | grep xtrace)
set +o xtrace
if [[ "$REPOS_UPDATED" != "True" || "$RETRY_UPDATE" = "True" ]]; then
# if there are transient errors pulling the updates, that's fine.
# It may be secondary repositories that we don't really care about.
apt_get update --allow-releaseinfo-change || /bin/true
apt_get update || /bin/true
REPOS_UPDATED=True
fi
$xtrace
fi
}
function real_install_package {
if is_ubuntu; then
apt_get install "$@"
elif is_fedora; then
yum_install "$@"
elif is_suse; then
zypper_install "$@"
else
exit_distro_not_supported "installing packages"
fi
}
# Distro-agnostic package installer
# install_package package [package ...]
function install_package {
update_package_repo
real_install_package $@ || RETRY_UPDATE=True update_package_repo && real_install_package $@
}
# Distro-agnostic function to tell if a package is installed
# is_package_installed package [package ...]
function is_package_installed {
if [[ -z "$@" ]]; then
return 1
fi
if [[ -z "$os_PACKAGE" ]]; then
GetOSVersion
fi
if [[ "$os_PACKAGE" = "deb" ]]; then
dpkg -s "$@" > /dev/null 2> /dev/null
elif [[ "$os_PACKAGE" = "rpm" ]]; then
rpm --quiet -q "$@"
else
exit_distro_not_supported "finding if a package is installed"
fi
}
# Distro-agnostic package uninstaller
# uninstall_package package [package ...]
function uninstall_package {
if is_ubuntu; then
apt_get purge "$@"
elif is_fedora; then
sudo ${YUM:-yum} remove -y "$@" ||:
elif is_suse; then
sudo zypper rm "$@"
else
exit_distro_not_supported "uninstalling packages"
fi
}
# Wrapper for ``yum`` to set proxy environment variables
# Uses globals ``OFFLINE``, ``*_proxy``, ``YUM``
# yum_install package [package ...]
function yum_install {
[[ "$OFFLINE" = "True" ]] && return
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
# The manual check for missing packages is because yum -y assumes
# missing packages are OK. See
# https://bugzilla.redhat.com/show_bug.cgi?id=965567
$sudo http_proxy="${http_proxy:-}" https_proxy="${https_proxy:-}" \
no_proxy="${no_proxy:-}" \
${YUM:-yum} install -y "$@" 2>&1 | \
awk '
BEGIN { fail=0 }
/No package/ { fail=1 }
{ print }
END { exit fail }' || \
die $LINENO "Missing packages detected"
# also ensure we catch a yum failure
if [[ ${PIPESTATUS[0]} != 0 ]]; then
die $LINENO "${YUM:-yum} install failure"
fi
}
# zypper wrapper to set arguments correctly
# Uses globals ``OFFLINE``, ``*_proxy``
# zypper_install package [package ...]
function zypper_install {
[[ "$OFFLINE" = "True" ]] && return
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
$sudo http_proxy="${http_proxy:-}" https_proxy="${https_proxy:-}" \
no_proxy="${no_proxy:-}" \
zypper --non-interactive install --auto-agree-with-licenses "$@"
}
# Process Functions
# =================
# _run_process() is designed to be backgrounded by run_process() to simulate a
# fork. It includes the dirty work of closing extra filehandles and preparing log
# files to produce the same logs as screen_it(). The log filename is derived
# from the service name.
# Uses globals ``CURRENT_LOG_TIME``, ``LOGDIR``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR``
# If an optional group is provided sg will be used to set the group of
# the command.
# _run_process service "command-line" [group]
function _run_process {
local service=$1
local command="$2"
local group=$3
# Undo logging redirections and close the extra descriptors
exec 1>&3
exec 2>&3
exec 3>&-
exec 6>&-
local real_logfile="${LOGDIR}/${service}.log.${CURRENT_LOG_TIME}"
if [[ -n ${LOGDIR} ]]; then
exec 1>&"$real_logfile" 2>&1
ln -sf "$real_logfile" ${LOGDIR}/${service}.log
if [[ -n ${SCREEN_LOGDIR} ]]; then
# Drop the backward-compat symlink
ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log
fi
# TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
export PYTHONUNBUFFERED=1
fi
# Run under ``setsid`` to force the process to become a session and group leader.
# The pid saved can be used with pkill -g to get the entire process group.
if [[ -n "$group" ]]; then
setsid sg $group "$command" & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid
else
setsid $command & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid
fi
# Just silently exit this process
exit 0
}
# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``.
# This is used for ``service_check`` when all the ``screen_it`` are called finished
# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``
# init_service_check
function init_service_check {
SCREEN_NAME=${SCREEN_NAME:-stack}
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
mkdir -p "$SERVICE_DIR/$SCREEN_NAME"
fi
rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure
}
# Find out if a process exists by partial name.
# is_running name
function is_running {
local name=$1
ps auxw | grep -v grep | grep ${name} > /dev/null
local exitcode=$?
# some times I really hate bash reverse binary logic
return $exitcode
}
# Run a single service under screen or directly
# If the command includes shell metachatacters (;<>*) it must be run using a shell
# If an optional group is provided sg will be used to run the
# command as that group.
# run_process service "command-line" [group]
function run_process {
local service=$1
local command="$2"
local group=$3
if is_service_enabled $service; then
if [[ "$USE_SCREEN" = "True" ]]; then
screen_process "$service" "$command" "$group"
else
# Spawn directly without screen
_run_process "$service" "$command" "$group" &
fi
fi
}
# Helper to launch a process in a named screen
# Uses globals ``CURRENT_LOG_TIME``, ```LOGDIR``, ``SCREEN_LOGDIR``, `SCREEN_NAME``,
# ``SERVICE_DIR``, ``USE_SCREEN``
# screen_process name "command-line" [group]
# Run a command in a shell in a screen window, if an optional group
# is provided, use sg to set the group of the command.
function screen_process {
local name=$1
local command="$2"
local group=$3
SCREEN_NAME=${SCREEN_NAME:-stack}
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
USE_SCREEN=$(trueorfalse True USE_SCREEN)
screen -S $SCREEN_NAME -X screen -t $name
local real_logfile="${LOGDIR}/${name}.log.${CURRENT_LOG_TIME}"
echo "LOGDIR: $LOGDIR"
echo "SCREEN_LOGDIR: $SCREEN_LOGDIR"
echo "log: $real_logfile"
if [[ -n ${LOGDIR} ]]; then
screen -S $SCREEN_NAME -p $name -X logfile "$real_logfile"
screen -S $SCREEN_NAME -p $name -X log on
ln -sf "$real_logfile" ${LOGDIR}/${name}.log
if [[ -n ${SCREEN_LOGDIR} ]]; then
# Drop the backward-compat symlink
ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${1}.log
fi
fi
# sleep to allow bash to be ready to be send the command - we are
# creating a new window in screen and then sends characters, so if
# bash isn't running by the time we send the command, nothing
# happens. This sleep was added originally to handle gate runs
# where we needed this to be at least 3 seconds to pass
# consistently on slow clouds. Now this is configurable so that we
# can determine a reasonable value for the local case which should
# be much smaller.
sleep ${SCREEN_SLEEP:-3}
NL=`echo -ne '\015'`
# This fun command does the following:
# - the passed server command is backgrounded
# - the pid of the background process is saved in the usual place
# - the server process is brought back to the foreground
# - if the server process exits prematurely the fg command errors
# and a message is written to stdout and the process failure file
#
# The pid saved can be used in stop_process() as a process group
# id to kill off all child processes
if [[ -n "$group" ]]; then
command="sg $group '$command'"
fi
# Append the process to the screen rc file
screen_rc "$name" "$command"
screen -S $SCREEN_NAME -p $name -X stuff "$command & echo \$! >$SERVICE_DIR/$SCREEN_NAME/${name}.pid; fg || echo \"$name failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/${name}.failure\"$NL"
}
# Screen rc file builder
# Uses globals ``SCREEN_NAME``, ``SCREENRC``
# screen_rc service "command-line"
function screen_rc {
SCREEN_NAME=${SCREEN_NAME:-stack}
SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc
if [[ ! -e $SCREENRC ]]; then
# Name the screen session
echo "sessionname $SCREEN_NAME" > $SCREENRC
# Set a reasonable statusbar
echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC
# Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off
echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC
echo "screen -t shell bash" >> $SCREENRC
fi
# If this service doesn't already exist in the screenrc file
if ! grep $1 $SCREENRC 2>&1 > /dev/null; then
NL=`echo -ne '\015'`
echo "screen -t $1 bash" >> $SCREENRC
echo "stuff \"$2$NL\"" >> $SCREENRC
if [[ -n ${LOGDIR} ]]; then
echo "logfile ${LOGDIR}/${1}.log.${CURRENT_LOG_TIME}" >>$SCREENRC
echo "log on" >>$SCREENRC
fi
fi
}
# Stop a service in screen
# If a PID is available use it, kill the whole process group via TERM
# If screen is being used kill the screen window; this will catch processes
# that did not leave a PID behind
# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``, ``USE_SCREEN``
# screen_stop_service service
function screen_stop_service {
local service=$1
SCREEN_NAME=${SCREEN_NAME:-stack}
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
USE_SCREEN=$(trueorfalse True USE_SCREEN)
if is_service_enabled $service; then
# Clean up the screen window
screen -S $SCREEN_NAME -p $service -X kill
fi
}
# Stop a service process
# If a PID is available use it, kill the whole process group via TERM
# If screen is being used kill the screen window; this will catch processes
# that did not leave a PID behind
# Uses globals ``SERVICE_DIR``, ``USE_SCREEN``
# stop_process service
function stop_process {
local service=$1
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
USE_SCREEN=$(trueorfalse True USE_SCREEN)
if is_service_enabled $service; then
# Kill via pid if we have one available
if [[ -r $SERVICE_DIR/$SCREEN_NAME/$service.pid ]]; then
pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid)
rm $SERVICE_DIR/$SCREEN_NAME/$service.pid
fi
if [[ "$USE_SCREEN" = "True" ]]; then
# Clean up the screen window
screen_stop_service $service
fi
fi
}
# Helper to get the status of each running service
# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``
# service_check
function service_check {
local service
local failures
SCREEN_NAME=${SCREEN_NAME:-stack}
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
echo "No service status directory found"
return
fi
# Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME
# make this -o errexit safe
failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null || /bin/true`
for service in $failures; do
service=`basename $service`
service=${service%.failure}
echo "Error: Service $service is not running"
done
if [ -n "$failures" ]; then
die $LINENO "More details about the above errors can be found with screen, with ./rejoin-stack.sh"
fi
}
# Tail a log file in a screen if USE_SCREEN is true.
function tail_log {
local name=$1
local logfile=$2
USE_SCREEN=$(trueorfalse True USE_SCREEN)
if [[ "$USE_SCREEN" = "True" ]]; then
screen_process "$name" "sudo tail -f $logfile"
fi
}
# Deprecated Functions
# --------------------
# _old_run_process() is designed to be backgrounded by old_run_process() to simulate a
# fork. It includes the dirty work of closing extra filehandles and preparing log
# files to produce the same logs as screen_it(). The log filename is derived
# from the service name and global-and-now-misnamed ``SCREEN_LOGDIR``
# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR``
# _old_run_process service "command-line"
function _old_run_process {
local service=$1
local command="$2"
# Undo logging redirections and close the extra descriptors
exec 1>&3
exec 2>&3
exec 3>&-
exec 6>&-
if [[ -n ${SCREEN_LOGDIR} ]]; then
exec 1>&${SCREEN_LOGDIR}/screen-${1}.log.${CURRENT_LOG_TIME} 2>&1
ln -sf ${SCREEN_LOGDIR}/screen-${1}.log.${CURRENT_LOG_TIME} ${SCREEN_LOGDIR}/screen-${1}.log
# TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
export PYTHONUNBUFFERED=1
fi
exec /bin/bash -c "$command"
die "$service exec failure: $command"
}
# old_run_process() launches a child process that closes all file descriptors and
# then exec's the passed in command. This is meant to duplicate the semantics
# of screen_it() without screen. PIDs are written to
# ``$SERVICE_DIR/$SCREEN_NAME/$service.pid`` by the spawned child process.
# old_run_process service "command-line"
function old_run_process {
local service=$1
local command="$2"
# Spawn the child process
_old_run_process "$service" "$command" &
echo $!
}
# Compatibility for existing start_XXXX() functions
# Uses global ``USE_SCREEN``
# screen_it service "command-line"
function screen_it {
if is_service_enabled $1; then
# Append the service to the screen rc file
screen_rc "$1" "$2"
if [[ "$USE_SCREEN" = "True" ]]; then
screen_process "$1" "$2"
else
# Spawn directly without screen
old_run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid
fi
fi
}
# Compatibility for existing stop_XXXX() functions
# Stop a service in screen
# If a PID is available use it, kill the whole process group via TERM
# If screen is being used kill the screen window; this will catch processes
# that did not leave a PID behind
# screen_stop service
function screen_stop {
# Clean up the screen window
stop_process $1
}
# Plugin Functions
# =================
DEVSTACK_PLUGINS=${DEVSTACK_PLUGINS:-""}
# enable_plugin [branch]
#
# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar)
# ``url`` is a git url
# ``branch`` is a gitref. If it's not set, defaults to master
function enable_plugin {
local name=$1
local url=$2
local branch=${3:-master}
DEVSTACK_PLUGINS+=",$name"
GITREPO[$name]=$url
GITDIR[$name]=$DEST/$name
GITBRANCH[$name]=$branch
}
# fetch_plugins
#
# clones all plugins
function fetch_plugins {
local plugins="${DEVSTACK_PLUGINS}"
local plugin
# short circuit if nothing to do
if [[ -z $plugins ]]; then
return
fi
echo "Fetching DevStack plugins"
for plugin in ${plugins//,/ }; do
git_clone_by_name $plugin
done
}
# load_plugin_settings
#
# Load settings from plugins in the order that they were registered
function load_plugin_settings {
local plugins="${DEVSTACK_PLUGINS}"
local plugin
# short circuit if nothing to do
if [[ -z $plugins ]]; then
return
fi
echo "Loading plugin settings"
for plugin in ${plugins//,/ }; do
local dir=${GITDIR[$plugin]}
# source any known settings
if [[ -f $dir/devstack/settings ]]; then
source $dir/devstack/settings
fi
done
}
# plugin_override_defaults
#
# Run an extremely early setting phase for plugins that allows default
# overriding of services.
function plugin_override_defaults {
local plugins="${DEVSTACK_PLUGINS}"
local plugin
# short circuit if nothing to do
if [[ -z $plugins ]]; then
return
fi
echo "Overriding Configuration Defaults"
for plugin in ${plugins//,/ }; do
local dir=${GITDIR[$plugin]}
# source any overrides
if [[ -f $dir/devstack/override-defaults ]]; then
# be really verbose that an override is happening, as it
# may not be obvious if things fail later.
echo "$plugin has overriden the following defaults"
cat $dir/devstack/override-defaults
source $dir/devstack/override-defaults
fi
done
}
# run_plugins
#
# Run the devstack/plugin.sh in all the plugin directories. These are
# run in registration order.
function run_plugins {
local mode=$1
local phase=$2
local plugins="${DEVSTACK_PLUGINS}"
local plugin
for plugin in ${plugins//,/ }; do
local dir=${GITDIR[$plugin]}
if [[ -f $dir/devstack/plugin.sh ]]; then
source $dir/devstack/plugin.sh $mode $phase
fi
done
}
function run_phase {
local mode=$1
local phase=$2
if [[ -d $TOP_DIR/extras.d ]]; then
for i in $TOP_DIR/extras.d/*.sh; do
[[ -r $i ]] && source $i $mode $phase
done
fi
# the source phase corresponds to settings loading in plugins
if [[ "$mode" == "source" ]]; then
load_plugin_settings
elif [[ "$mode" == "override_defaults" ]]; then
plugin_override_defaults
else
run_plugins $mode $phase
fi
}
# Service Functions
# =================
# remove extra commas from the input string (i.e. ``ENABLED_SERVICES``)
# _cleanup_service_list service-list
function _cleanup_service_list {
echo "$1" | sed -e '
s/,,/,/g;
s/^,//;
s/,$//
'
}
# disable_all_services() removes all current services
# from ``ENABLED_SERVICES`` to reset the configuration
# before a minimal installation
# Uses global ``ENABLED_SERVICES``
# disable_all_services
function disable_all_services {
ENABLED_SERVICES=""
}
# Remove all services starting with '-'. For example, to install all default
# services except rabbit (rabbit) set in ``localrc``:
# ENABLED_SERVICES+=",-rabbit"
# Uses global ``ENABLED_SERVICES``
# disable_negated_services
function disable_negated_services {
local tmpsvcs="${ENABLED_SERVICES}"
local service
for service in ${tmpsvcs//,/ }; do
if [[ ${service} == -* ]]; then
tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g")
fi
done
ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
}
# disable_service() removes the services passed as argument to the
# ``ENABLED_SERVICES`` list, if they are present.
#
# For example:
# disable_service rabbit
#
# This function does not know about the special cases
# for nova, glance, and neutron built into is_service_enabled().
# Uses global ``ENABLED_SERVICES``
# disable_service service [service ...]
function disable_service {
local tmpsvcs=",${ENABLED_SERVICES},"
local service
for service in $@; do
if is_service_enabled $service; then
tmpsvcs=${tmpsvcs//,$service,/,}
fi
done
ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
}
# enable_service() adds the services passed as argument to the
# ``ENABLED_SERVICES`` list, if they are not already present.
#
# For example:
# enable_service qpid
#
# This function does not know about the special cases
# for nova, glance, and neutron built into is_service_enabled().
# Uses global ``ENABLED_SERVICES``
# enable_service service [service ...]
function enable_service {
local tmpsvcs="${ENABLED_SERVICES}"
local service
for service in $@; do
if ! is_service_enabled $service; then
tmpsvcs+=",$service"
fi
done
ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
disable_negated_services
}
# is_service_enabled() checks if the service(s) specified as arguments are
# enabled by the user in ``ENABLED_SERVICES``.
#
# Multiple services specified as arguments are ``OR``'ed together; the test
# is a short-circuit boolean, i.e it returns on the first match.
#
# There are special cases for some 'catch-all' services::
# **nova** returns true if any service enabled start with **n-**
# **cinder** returns true if any service enabled start with **c-**
# **ceilometer** returns true if any service enabled start with **ceilometer**
# **glance** returns true if any service enabled start with **g-**
# **neutron** returns true if any service enabled start with **q-**
# **swift** returns true if any service enabled start with **s-**
# **trove** returns true if any service enabled start with **tr-**
# For backward compatibility if we have **swift** in ENABLED_SERVICES all the
# **s-** services will be enabled. This will be deprecated in the future.
#
# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``.
# We also need to make sure to treat **n-cell-region** and **n-cell-child**
# as enabled in this case.
#
# Uses global ``ENABLED_SERVICES``
# is_service_enabled service [service ...]
function is_service_enabled {
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local enabled=1
local services=$@
local service
for service in ${services}; do
[[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && enabled=0
# Look for top-level 'enabled' function for this service
if type is_${service}_enabled >/dev/null 2>&1; then
# A function exists for this service, use it
is_${service}_enabled
enabled=$?
fi
# TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled()
# are implemented
[[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0
[[ ${service} == n-cpu-* && ${ENABLED_SERVICES} =~ "n-cpu" ]] && enabled=0
[[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0
[[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && enabled=0
[[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0
[[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && enabled=0
[[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && enabled=0
[[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && enabled=0
[[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && enabled=0
[[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && enabled=0
[[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && enabled=0
done
$xtrace
return $enabled
}
# Toggle enable/disable_service for services that must run exclusive of each other
# $1 The name of a variable containing a space-separated list of services
# $2 The name of a variable in which to store the enabled service's name
# $3 The name of the service to enable
function use_exclusive_service {
local options=${!1}
local selection=$3
local out=$2
[ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1
local opt
for opt in $options;do
[[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt
done
eval "$out=$selection"
return 0
}
# System Functions
# ================
# Only run the command if the target file (the last arg) is not on an
# NFS filesystem.
function _safe_permission_operation {
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local args=( $@ )
local last
local sudo_cmd
local dir_to_check
let last="${#args[*]} - 1"
local dir_to_check=${args[$last]}
if [ ! -d "$dir_to_check" ]; then
dir_to_check=`dirname "$dir_to_check"`
fi
if is_nfs_directory "$dir_to_check" ; then
$xtrace
return 0
fi
if [[ $TRACK_DEPENDS = True ]]; then
sudo_cmd="env"
else
sudo_cmd="sudo"
fi
$xtrace
$sudo_cmd $@
}
# Exit 0 if address is in network or 1 if address is not in network
# ip-range is in CIDR notation: 1.2.3.4/20
# address_in_net ip-address ip-range
function address_in_net {
local ip=$1
local range=$2
local masklen=${range#*/}
local network=$(maskip ${range%/*} $(cidr2netmask $masklen))
local subnet=$(maskip $ip $(cidr2netmask $masklen))
[[ $network == $subnet ]]
}
# Add a user to a group.
# add_user_to_group user group
function add_user_to_group {
local user=$1
local group=$2
if [[ -z "$os_VENDOR" ]]; then
GetOSVersion
fi
# SLE11 and openSUSE 12.2 don't have the usual usermod
if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then
sudo usermod -a -G "$group" "$user"
else
sudo usermod -A "$group" "$user"
fi
}
# Convert CIDR notation to a IPv4 netmask
# cidr2netmask cidr-bits
function cidr2netmask {
local maskpat="255 255 255 255"
local maskdgt="254 252 248 240 224 192 128"
set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3}
echo ${1-0}.${2-0}.${3-0}.${4-0}
}
# Gracefully cp only if source file/dir exists
# cp_it source destination
function cp_it {
if [ -e $1 ] || [ -d $1 ]; then
cp -pRL $1 $2
fi
}
# HTTP and HTTPS proxy servers are supported via the usual environment variables [1]
# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in
# ``localrc`` or on the command line if necessary::
#
# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html
#
# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh
# Unused function
#function export_proxy_variables {
# if isset http_proxy ; then
# export http_proxy=$http_proxy
# fi
# if isset https_proxy ; then
# export https_proxy=$https_proxy
# fi
# if isset no_proxy ; then
# export no_proxy=$no_proxy
# fi
#}
# Returns true if the directory is on a filesystem mounted via NFS.
function is_nfs_directory {
local mount_type=`stat -f -L -c %T $1`
test "$mount_type" == "nfs"
}
# Return the network portion of the given IP address using netmask
# netmask is in the traditional dotted-quad format
# maskip ip-address netmask
function maskip {
local ip=$1
local mask=$2
local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}"
local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.}))
echo $subnet
}
# Service wrapper to restart services
# restart_service service-name
function restart_service {
if is_ubuntu; then
sudo /usr/sbin/service $1 restart
else
sudo /sbin/service $1 restart
fi
}
# Only change permissions of a file or directory if it is not on an
# NFS filesystem.
function safe_chmod {
_safe_permission_operation chmod $@
}
# Only change ownership of a file or directory if it is not on an NFS
# filesystem.
function safe_chown {
_safe_permission_operation chown $@
}
# Service wrapper to start services
# start_service service-name
function start_service {
if is_ubuntu; then
sudo /usr/sbin/service $1 start
else
sudo /sbin/service $1 start
fi
}
# Service wrapper to stop services
# stop_service service-name
function stop_service {
if is_ubuntu; then
sudo /usr/sbin/service $1 stop
else
sudo /sbin/service $1 stop
fi
}
# Restore xtrace
$XTRACE
# Local variables:
# mode: shell-script
# End:
if [[ $EUID -gt 0 ]]; then
echo "####################################################"
echo "# #"
echo "# ERROR: You must be root to run this script!!!! #"
echo "# #"
echo "####################################################"
exit 1
fi
export PATH=$PATH:/usr/local/bin:/usr/local/sbin
# XXX a bit brutal but otherwise it cannot work, without tty enabled.
if [ -f /etc/sudoers ]; then
sed -i "/requiretty/d" /etc/sudoers
fi
# Include Additional Functions
function download_playbook {
if [ ! -f /etc/opt/slapcache.cfg ]; then
slapcache-conf
fi
DFILE="/tmp/tmpplaybook$(basename $0).$$/"
TFILE="archive.tar.gz"
mkdir -p $DFILE
cd $DFILE
slapcache-download --destination=$TFILE
tar -xzvf $TFILE
rm $TFILE
}
# Determine what system we are running on. This provides ``os_VENDOR``,
# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
# and ``DISTRO``
GetDistro
if [[ ${DISTRO} =~ (-.) ]] && [[ -f /etc/debian_version ]]; then
apt_get install lsb-release
GetDistro
fi
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``export FORCE=yes``
if [[ ! ${DISTRO} =~ (jessie|stretch|buster|bullseye|xenial|bionic|focal|rhel7) ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
if [[ "$FORCE" != "yes" ]]; then
die $LINENO "If you wish to run this script anyway run with FORCE=yes"
fi
fi
# Make sure wheezy backports are available.
if [[ $DISTRO == "wheezy" ]]; then
echo "deb http://archive.debian.org/debian wheezy-backports main contrib " > /etc/apt/sources.list.d/wheezy-backports.list
fi
if is_fedora && [[ $DISTRO =~ rhel7|f20|f19 ]]; then
# RHEL requires EPEL for many Ansible dependencies
# NOTE: We always remove and install latest -- some environments
# use snapshot images, and if EPEL version updates they break
# unless we update them to latest version.
if sudo yum repolist enabled epel | grep -q 'epel'; then
uninstall_package epel-release || true
fi
# This trick installs the latest epel-release from a bootstrap
# repo, then removes itself (as epel-release installed the
# "real" repo).
#
# You would think that rather than this, you could use
# $releasever directly in .repo file we create below. However
# RHEL gives a $releasever of "6Server" which breaks the path;
# see https://bugzilla.redhat.com/show_bug.cgi?id=1150759
cat < $ANSIBLE_PLUGIN_LOCATION/log_parse.py
from __future__ import absolute_import
import os
import time
import json
import ansible
baseModule = object
ANSIBLE_VERSION = 1
if hasattr(ansible, 'plugins') and hasattr(ansible.plugins, 'callback'):
baseModule = ansible.plugins.callback.CallbackBase
ANSIBLE_VERSION = 2
class CallbackModule(baseModule):
"""
logs playbook results, per host, in /var/log/ansible/hosts
"""
log_path = '/var/log/ansible/hosts'
fd_list = {}
def __init__(self):
if ANSIBLE_VERSION > 1:
super(CallbackModule, self).__init__()
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
else:
for filename in os.listdir(self.log_path):
filepath = os.path.join(self.log_path, filename)
if os.path.isfile(filepath):
os.unlink(filepath)
def writeLog(self, host, category, content):
if category not in self.fd_list:
self.fd_list[category] = open(
os.path.join(self.log_path, '%s_%s' % (host, category)), "a"
)
self.fd_list[category].write(content + '\n')
def log(self, host, category, data, ignore_errors=False):
if host == "localhost":
host = "127.0.0.1" # keep compatibility
if type(data) is dict:
if '_ansible_verbose_override' in data:
# avoid logging extraneous data
return
content = json.dumps(data)
if ignore_errors:
category = '%s_IGNORED' % category
self.writeLog(host, category, content)
def _stats(self, stats):
for key in self.fd_list:
self.fd_list[key].close()
def runner_on_failed(self, host, res, ignore_errors=False):
self.log(host, 'FAILED', res, ignore_errors)
def runner_on_ok(self, host, res):
self.log(host, 'OK', res)
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
self.log(host, 'UNREACHABLE', res)
def runner_on_async_failed(self, host, res, jid):
self.log(host, 'ASYNC_FAILED', res)
def playbook_on_import_for_host(self, host, imported_file):
self.log(host, 'IMPORTED', imported_file)
def playbook_on_not_import_for_host(self, host, missing_file):
self.log(host, 'NOTIMPORTED', missing_file)
def playbook_on_stats(self, stats):
self._stats(stats)
EOF
ansible localhost -m pip -a name=setuptools --connection=local
ansible localhost -m pip -a name=wheel --connection=local
ansible localhost -m pip -a name=slapcache --connection=local
ansible localhost -m pip -a name=requests --connection=local
if [ ! -f /usr/share/ansible_plugins/mitogen.zip ]; then
wget -O /usr/share/ansible_plugins/mitogen.zip https://shacache.nxdcdn.com/3a935ff257ddc0ad4e0f23d71681e026f14f309f4bed0a8e2a217da9b294be2c676196703f0dde856ece49d711d0221deae70812f035b24aa5cdd0ca02790e85
unzip /usr/share/ansible_plugins/mitogen.zip -d /usr/share/ansible_plugins/mitogen/
mv /usr/share/ansible_plugins/mitogen/mitogen-*/* /usr/share/ansible_plugins/mitogen/
fi
# Include Additional Functions
if [ ! -f /etc/opt/slapcache.cfg ]; then
slapcache-conf
fi
sed -i "s/key = slapos-global-key/key = slapos-global-official-vifib-key/g" /etc/opt/slapcache.cfg
DFILE="/tmp/tmpplaybook_unstable$(basename $0).$$/"
TFILE="archive.tar.gz"
mkdir -p $DFILE
cd $DFILE
wget -O $TFILE http://10.0.2.100/playbook.tar.gz
tar -xzvf $TFILE
rm $TFILE
clear
echo "Starting Ansible playbook:"
ansible-playbook $TEST_YML_PATH -i hosts --connection=local
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite/dummy/ 0000775 0000000 0000000 00000000000 14241130220 0031364 5 ustar 00root root 0000000 0000000 README.md 0000664 0000000 0000000 00000000570 14241130220 0032566 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite/dummy # Dummy Software Release
The main purpose of this Software Release is to be used in tests !
As a consequence, any change to it must be tied with corresponding changes in
tests using this Software Release.
Dummy has be written with the purpose to be fast, so it embeeds the
minimum set of needed features. Please, do not change anything of it
without any strong motivation.
instance.cfg 0000664 0000000 0000000 00000001013 14241130220 0033565 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite/dummy [buildout]
parts =
log-writer
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
offline = true
[directory]
recipe = slapos.cookbook:mkdirectory
home = $${buildout:directory}
bin = $${:home}/bin
etc = $${:home}/etc
service = $${:etc}/service
script = $${:etc}/run
var = $${:home}/var
log = $${:var}/log
[log-writer]
recipe = slapos.recipe.template
inline =
#!/bin/sh
echo "Hello : $(date)" >> $${directory:log}/log.log
output = $${directory:script}/log-writer
software.cfg 0000664 0000000 0000000 00000000436 14241130220 0033623 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite/dummy [buildout]
extends =
../../../../stack/slapos.cfg
parts =
instance-template
slapos-cookbook
[instance-template]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/instance.cfg
output = ${buildout:directory}/template.cfg
md5sum = 0385b31519c3489fb8d0919621e02ac7
instance-kvm-resilient-test.cfg.jinja2 0000664 0000000 0000000 00000005653 14241130220 0037370 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite [buildout]
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
parts =
request-resilient-instance
deploy-unit-test
deploy-scalability-test
[directory]
recipe = slapos.cookbook:mkdirectory
home = ${buildout:directory}
etc = ${:home}/etc/
var = ${:home}/var/
srv = ${:home}/srv/
bin = ${:home}/bin/
tmp = ${:home}/tmp/
log = ${:var}/log/
services = ${:etc}/service/
scripts = ${:etc}/run/
[deploy-scalability-test]
recipe = slapos.cookbook:wrapper
# XXX: put it in etc/run in case of scalability test so that it runs automatically.
wrapper-path = ${directory:bin}/runKVMResiliencyTestSuite
testnode-parameters = --test-result-path={{ slapparameter_dict.get('test-result-path') }} --revision={{ slapparameter_dict.get('test-suite-revision') }} --node-title={{ slapparameter_dict.get('scalability-launcher-title') }} --test-suite={{ slapparameter_dict.get('test-suite') }} --test-suite-master-url={{ slapparameter_dict.get('test-suite-master-url') }} --log-path=${directory:log}
test-parameters = server_url=${slap-connection:server-url} key_file=${slap-connection:key-file} cert_file=${slap-connection:cert-file} computer_id=${slap-connection:computer-id} partition_id=${slap-connection:partition-id} software=${slap-connection:software-release-url} namebase=kvm root_instance_name='${request-resilient-instance:name}'
command-line = {{ bin_directory }}/runResiliencyScalabilityTestNode ${:testnode-parameters} ${:test-parameters}
[deploy-unit-test]
recipe = collective.recipe.template
test-parameters = server_url=${slap-connection:server-url} key_file=${slap-connection:key-file} cert_file=${slap-connection:cert-file} computer_id=${slap-connection:computer-id} partition_id=${slap-connection:partition-id} software=${slap-connection:software-release-url} namebase=kvm root_instance_name='${request-resilient-instance:name}'
input = inline:
#!/bin/sh
exec {{ bin_directory }}/runResiliencyUnitTestTestNode $@ ${:test-parameters}
output = ${directory:bin}/runTestSuite
mode = 755
[request-resilient-instance]
<= slap-connection
recipe = slapos.cookbook:request
software-url = ${slap-connection:software-release-url}
software-type = kvm-resilient
name = Resilient Instance (Root Instance)
{% for key, value in slapparameter_dict.get('cluster', {}).items() -%}
config-{{ key }} = {{ dumps(value) }}
{% endfor -%}
config-virtual-hard-drive-url = ${slap-parameter:virtual-hard-drive-url}
config-virtual-hard-drive-md5sum = ${slap-parameter:virtual-hard-drive-md5sum}
config-virtual-hard-drive-gzipped = ${slap-parameter:virtual-hard-drive-gzipped}
config-resiliency-backup-periodicity = */5 * * * *
config-resilient-clone-number = 1
config-ignore-known-hosts-file = false
return = ipv6
# XXX What to do?
sla-computer_guid = ${slap-connection:computer-id}
[slap-parameter]
virtual-hard-drive-url = {{ default_test_image_url }}
virtual-hard-drive-md5sum = {{ default_test_image_md5sum }}
virtual-hard-drive-gzipped = true
instance-resilient-test.cfg.jinja2 0000664 0000000 0000000 00000006127 14241130220 0036572 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite [buildout]
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
parts =
request-resilient-instance
deploy-unit-test
deploy-scalability-test
[directory]
recipe = slapos.cookbook:mkdirectory
home = ${buildout:directory}
etc = ${:home}/etc/
var = ${:home}/var/
srv = ${:home}/srv/
bin = ${:home}/bin/
tmp = ${:home}/tmp/
log = ${:var}/log/
services = ${:etc}/service/
scripts = ${:etc}/run/
[deploy-scalability-test]
recipe = slapos.cookbook:wrapper
testnode-parameters = --test-result-path={{ slapparameter_dict.get('test-result-path') }} --revision={{ slapparameter_dict.get('test-suite-revision') }} --node-title={{ slapparameter_dict.get('scalability-launcher-title') }} --test-suite={{ slapparameter_dict.get('test-suite') }} --test-suite-master-url={{ slapparameter_dict.get('test-suite-master-url') }} --log-path=${directory:log}
test-parameters = server_url=${slap-connection:server-url} key_file=${slap-connection:key-file} cert_file=${slap-connection:cert-file} computer_id=${slap-connection:computer-id} partition_id=${slap-connection:partition-id} software=${slap-connection:software-release-url} namebase=runner root_instance_name='${request-resilient-instance:name}'
command-line = {{ bin_directory }}/runResiliencyScalabilityTestNode ${:testnode-parameters} ${:test-parameters}
wrapper-path = ${directory:scripts}/runResiliencyTestSuite
[deploy-unit-test]
recipe = collective.recipe.template
test-parameters = server_url=${slap-connection:server-url} key_file=${slap-connection:key-file} cert_file=${slap-connection:cert-file} computer_id=${slap-connection:computer-id} partition_id=${slap-connection:partition-id} software=${slap-connection:software-release-url} namebase=runner root_instance_name='${request-resilient-instance:name}'
input = inline:
#!/bin/sh
exec {{ bin_directory }}/runResiliencyUnitTestTestNode $@ ${:test-parameters}
output = ${directory:bin}/runTestSuite
mode = 755
[request-resilient-instance]
<= slap-connection
recipe = slapos.cookbook:request
software-url = ${slap-connection:software-release-url}
software-type = resilient
name = Resilient Instance (Root Instance)
{% for key, value in slapparameter_dict.get('cluster', {}).items() -%}
config-{{ key }} = {{ dumps(value) }}
{% endfor -%}
config-resiliency-backup-periodicity = */10 * * * *
config-resilient-clone-number = 1
config-ignore-known-hosts-file = false
config-cpu-usage-ratio = 1
# Use one external folder (same of erp5testnode for build the software
# This is a way to preserve the latest software release build speeding up
# the tests.)
config-software-root = ${buildout:directory}/../../soft
config-buildout-shared-folder = ${buildout:directory}/../../shared
config-no-ipv4-frontend = true
# Use same repository / branch for the software installed in slaprunner
# than the tested slaprunner itself.
config-slapos-repository = {{ slapos_repository_url }}
config-slapos-reference = {{ slapos_repository_branch }}
# XXX Hack to deploy Root Instance on the same computer as the type-test Instance
sla-computer_guid = ${slap-connection:computer-id}
return = backend-url
[slap-parameter]
instance.cfg.in 0000664 0000000 0000000 00000001330 14241130220 0033041 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite [buildout]
extends = ${template:output}
[switch_softwaretype]
default = $${:test}
RootSoftwareInstance = $${:test}
# Used for the test of resiliency. The system wants a "test" software_type.
test = dynamic-template-resilient-test:output
[dynamic-template-resilient-test]
recipe = slapos.recipe.template:jinja2
url = ${template-resilient-test:target}
output = $${buildout:directory}/template-resilient-test.cfg
bin-directory = ${buildout:bin-directory}
context =
key develop_eggs_directory buildout:develop-eggs-directory
key eggs_directory buildout:eggs-directory
key slapparameter_dict slap-configuration:configuration
raw bin_directory ${buildout:bin-directory}
${template-resilient-test:extra-context}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite/kvm.cfg 0000664 0000000 0000000 00000001212 14241130220 0031503 0 ustar 00root root 0000000 0000000 [buildout]
extends =
../../kvm/development.cfg
testsuite.cfg
parts += template-erp5testnode
[default-test-image]
recipe = slapos.recipe.build:download
url = http://www.nexedi.org/static/slapos/kvm_resiliency_test/virtual.qcow.gz
md5sum = dd82c771f6f7738fb4b0fc1330ed8236
[template-resilient-test]
filename = instance-kvm-resilient-test.cfg.jinja2
md5sum = 5f255502973530181ee45d93fa00a3c4
# Ingest extra-context, on a the final template-resilient-test rendering
# always ingest raw values.
extra-context =
raw default_test_image_url file://${default-test-image:target}
raw default_test_image_md5sum ${default-test-image:md5sum}
slaprunner.cfg 0000664 0000000 0000000 00000001513 14241130220 0033024 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite [buildout]
extends =
../../slaprunner/development.cfg
testsuite.cfg
parts += template-erp5testnode
[template-resilient-test]
filename = instance-resilient-test.cfg.jinja2
md5sum = e4b04aa6fd3413bc6ae38823abdcc8fa
# We have to use an extra level of indentation here because this is substituted
# during software buildout to generate instance buildout, but the
# slapos.recipe.template recipe doing the substitution does string replacements
# without knowledge of the buildout syntax, so we want the second line to be
# indented in the final generated instance buildout.
extra-context =
raw slapos_repository_url ${slapos.cookbook-repository:repository}
raw slapos_repository_branch ${slapos.cookbook-repository:branch}
[exporter-default-configuration]
# Define shorter interaction to speed up tests
backup_wait_time = 1
testsuite.cfg 0000664 0000000 0000000 00000000557 14241130220 0032673 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/erp5testnode/testsuite [buildout]
extends = buildout.hash.cfg
[template-erp5testnode]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/instance.cfg.in
output = ${buildout:directory}/template.cfg
[template]
output = ${buildout:directory}/template-original.cfg
[template-resilient-test]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:filename}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/fluentd/ 0000775 0000000 0000000 00000000000 14241130220 0025220 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/fluentd/buildout.hash.cfg 0000664 0000000 0000000 00000001566 14241130220 0030462 0 ustar 00root root 0000000 0000000 # THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[instance-profile]
filename = instance.cfg
md5sum = 41f5acc071609a0c4b5ada295ede6bac
[template-fluentd]
filename = instance-fluentd.cfg
md5sum = 35f9d95f6a75e28bfeafc3568ca16f05
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/fluentd/instance-fluentd.cfg 0000664 0000000 0000000 00000001407 14241130220 0031146 0 ustar 00root root 0000000 0000000 [service-fluentd]
recipe = slapos.cookbook:wrapper
wrapper-path = {{ directory['service'] }}/fluentd-service
command-line = ${fluentd:location}/bin/fluentd -v -c {{ fluentd_agent_conf }}
environment =
GEM_PATH=${fluentd:location}/lib/ruby/gems/
{% set part_list = [] -%}
{% for port in port_list -%}
{% set promise_section_title = 'fluentd-port-' ~ port ~ '-listening' -%}
{% do part_list.append(promise_section_title) -%}
[{{ promise_section_title }}]
<= monitor-promise-base
promise = check_socket_listening
name = {{ promise_section_title }}.py
config-host = $${slap-configuration:ipv6-random}
config-port = {{ port }}
{% endfor %}
[buildout]
parts =
service-fluentd
{%- for part in part_list %}
{{ part }}
{%- endfor %}
extends = ${monitor-template:output}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/fluentd/instance-input-schema.json0000664 0000000 0000000 00000000571 14241130220 0032315 0 ustar 00root root 0000000 0000000 {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"description": "Parameters to instantiate Fluentd",
"additionalProperties": false,
"properties": {
"conf_text": {
"description": "Fluentd configuration. You can write it entirely by yourself. See fluentd-agent.conf.jinja2.in",
"default": "",
"type": "string"
}
}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/fluentd/instance.cfg 0000664 0000000 0000000 00000002702 14241130220 0027506 0 ustar 00root root 0000000 0000000 [buildout]
parts =
switch-softwaretype
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
offline = true
[switch-softwaretype]
recipe = slapos.cookbook:switch-softwaretype
default = dynamic-template-fluentd:output
RootSoftwareInstance = $${:default}
[directory]
recipe = slapos.cookbook:mkdirectory
home = $${buildout:directory}
etc = $${:home}/etc
var = $${:home}/var
service = $${:etc}/service
bin = $${:home}/bin
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration
computer = $${slap_connection:computer_id}
partition = $${slap_connection:partition_id}
url = $${slap_connection:server_url}
key = $${slap_connection:key_file}
cert = $${slap_connection:cert_file}
[dynamic-template-fluentd]
recipe = slapos.recipe.template:jinja2
url = ${template-fluentd:output}
output = $${buildout:directory}/instance-fluentd.cfg
extensions = jinja2.ext.do
context =
key fluentd_agent_conf fluentd-agent-conf:output
key port_list fluentd-conf:port-list
section directory directory
[fluentd-conf]
recipe = slapos.recipe.build
slapparameter-dict = $${slap-configuration:configuration}
init =
import re
options['text'] = options['slapparameter-dict'].get('conf_text') or ''
options['port-list'] = re.findall(r'.*port (\d+).*<\/source>', options['text'], re.DOTALL)
[fluentd-agent-conf]
recipe = slapos.recipe.template
inline = $${fluentd-conf:text}
output = $${directory:etc}/fluentd-agent.conf
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/fluentd/software-py2.cfg 0000664 0000000 0000000 00000000077 14241130220 0030247 0 ustar 00root root 0000000 0000000 [buildout]
extends =
software.cfg
[python]
part = python2.7
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/fluentd/software.cfg 0000664 0000000 0000000 00000001051 14241130220 0027530 0 ustar 00root root 0000000 0000000 [buildout]
extends =
buildout.hash.cfg
../../component/fluentd/buildout.cfg
../../stack/slapos.cfg
../../stack/monitor/buildout.cfg
parts =
instance-profile
slapos-cookbook
[python]
part = python3
[template-base]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
[instance-profile]
< = template-base
output = ${buildout:directory}/template.cfg
[template-fluentd]
< = template-base
output = ${buildout:directory}/template-fluentd.cfg
[fluentd]
gems +=
fluent-plugin-wendelin==0.5
fluent-plugin-bin==0.3
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/fluentd/test/ 0000775 0000000 0000000 00000000000 14241130220 0026177 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/fluentd/test/README.md 0000664 0000000 0000000 00000000043 14241130220 0027453 0 ustar 00root root 0000000 0000000 Tests for fluentd Software Release
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/fluentd/test/setup.py 0000664 0000000 0000000 00000003763 14241130220 0027722 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.fluentd'
with open("README.md") as f:
long_description = f.read()
setup(name=name,
version=version,
description="Test for SlapOS' fluentd",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'msgpack',
'slapos.core',
'slapos.libnetworkcache',
'erp5.util',
'supervisor',
'six',
],
zip_safe=True,
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/fluentd/test/test.py 0000664 0000000 0000000 00000020472 14241130220 0027535 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import msgpack
import os
import random
import shutil
import socket
import struct
import subprocess
import tempfile
import time
import six
import sys
from six.moves.SimpleHTTPServer import SimpleHTTPRequestHandler
from six.moves.socketserver import StreamRequestHandler, TCPServer
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
from slapos.testing.utils import findFreeTCPPort
FLUENTD_PORT = 24224
FLUSH_INTERVAL = 1
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..',
'software%s.cfg' % ("-py2" if six.PY2 else ""))))
class FluentdTestCase(SlapOSInstanceTestCase):
__partition_reference__ = 'fluentd'
def test_process(self):
expected_process_name_list = [
'fluentd-service-on-watch',
]
with self.slap.instance_supervisor_rpc as supervisor:
process_names = [process['name']
for process in supervisor.getAllProcessInfo()]
for expected_process_name in expected_process_name_list:
self.assertIn(expected_process_name, process_names)
class OneRequestServer(TCPServer):
address_family = socket.AF_INET6
timeout = 1
def get_first_data(self, flush_interval=1):
start = time.time()
while(not self.RequestHandlerClass.received_data
and time.time() - start < 10*flush_interval):
self.handle_request()
return self.RequestHandlerClass.received_data
class WendelinTutorialTestCase(FluentdTestCase):
@classmethod
def get_configuration(cls):
return ''
@classmethod
def getInstanceParameterDict(cls):
return {'conf_text': cls._conf,}
@classmethod
def measureDict(cls):
return {k: v.encode() for k, v in
zip((b'pressure', b'humidity', b'temperature'), cls._measurementList)}
@classmethod
def setUpClass(cls):
cls._tmp_dir = tempfile.mkdtemp()
cls._measurementList = cls.sensor_value_list()
cls._conf = cls.get_configuration()
super(FluentdTestCase, cls).setUpClass()
fluentd_dir = os.path.join(cls.computer_partition_root_path,
'software_release', 'parts', 'fluentd')
cls._fluentd_bin = os.path.join(fluentd_dir, 'bin', 'fluentd')
cls._gem_path = os.path.join(fluentd_dir, 'lib', 'ruby', 'gems')
@classmethod
def sensor_value_list(cls):
return [str(value) for value in (round(random.uniform(870, 1084), 2),
round(random.uniform(0, 100), 2),
round(random.uniform(-20, 50), 3))]
def serve(self, port, request_handler_class):
server_address = (self._ipv6_address, port)
server = OneRequestServer(server_address, request_handler_class)
data = server.get_first_data(FLUSH_INTERVAL)
server.server_close()
return data
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls._tmp_dir)
super(FluentdTestCase, cls).tearDownClass()
def read_fluentd_conf(self, configuration):
conf_path = os.path.join(self._tmp_dir, 'fluentd.conf')
with open(conf_path, "w") as conf_file:
conf_file.write(configuration)
return subprocess.check_output(
[self._fluentd_bin, '-c', conf_path, '--dry-run'],
env={'GEM_PATH': self._gem_path},
universal_newlines=True,
)
def _test_configuration(self, expected_str):
self.assertRegexpMatches(
self.read_fluentd_conf(self._conf),
expected_str,
)
class FluentdHTTPRequestHandler(StreamRequestHandler):
received_data = b''
def handle(self):
data = self.rfile.readline().strip()
# ignore heartbeats (https://docs.fluentd.org/output/forward#heartbeat_type)
if len(data) > 0:
FluentdHTTPRequestHandler.received_data = data
# see https://wendelin.nexedi.com/wendelin-Learning.Track/wendelin-Tutorial.Setup.Fluentd.on.Sensor
class SensorConfTestCase(WendelinTutorialTestCase):
@classmethod
def get_configuration(cls):
script_path = os.path.join(cls._tmp_dir, "custom_read_bme280.py")
with open(script_path, "w") as script:
script.write(cls.sensor_script(cls._measurementList))
return cls.sensor_conf(script_path)
@classmethod
def sensor_conf(cls, script_path):
return '''\
@type exec
tag tag.name
command %s %s
run_interval %ss
keys pressure, humidity, temperature
@type forward
name myserver1
host %s
flush_mode immediate
''' % (sys.executable, script_path, FLUSH_INTERVAL, cls._ipv6_address)
@classmethod
def sensor_script(cls, measurementList):
return '''\
#!/usr/bin/python
# -*- coding: utf-8 -*-
print("%s")''' % "\t".join(measurementList)
def test_configuration(self):
self._test_configuration(
r'adding forwarding server \'myserver1\' host="%s" port=%s weight=60'
% (self._ipv6_address, FLUENTD_PORT)
)
def test_send_data(self):
tag, data, header = msgpack.unpackb(
self.serve(FLUENTD_PORT, FluentdHTTPRequestHandler),
raw=True,
)
self.assertEqual(b'tag.name', tag)
self.assertEqual(self.measureDict(), msgpack.unpackb(data)[-1])
self.assertEqual({b'compressed': b'text', b'size': 1}, header)
class WendelinHTTPRequestHandler(SimpleHTTPRequestHandler):
received_data = b''
def do_POST(self):
WendelinHTTPRequestHandler.received_data = self.rfile.read(
int(self.headers['Content-Length']))
self.send_response(200)
self.end_headers()
# see https://wendelin.nexedi.com/wendelin-Learning.Track/wendelin-Tutorial.Setup.Fluentd.on.IOTGateway
class GatewayConfTestCase(WendelinTutorialTestCase):
@classmethod
def gateway_conf(cls, fluentd_port, wendelin_port):
return '''\
@type forward
port %s
bind %s
@type wendelin
streamtool_uri http://[%s]:%s/erp5/portal_ingestion_policies/default
user foo
password bar
flush_mode interval
@type file
path fluentd-buffer-file/
flush_interval %ss
''' % (fluentd_port, cls._ipv6_address, cls._ipv6_address,
wendelin_port, FLUSH_INTERVAL)
@classmethod
def get_configuration(cls):
fluentd_port = findFreeTCPPort(cls._ipv6_address)
cls._fluentd_port = fluentd_port
wendelin_port = findFreeTCPPort(cls._ipv6_address)
cls._wendelin_port = wendelin_port
return cls.gateway_conf(fluentd_port, wendelin_port)
def test_configuration_file(self):
self._test_configuration('starting fluentd')
def test_wendelin_data_forwarding(self):
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.connect((self._ipv6_address, self._fluentd_port))
data = [
msgpack.ExtType(0, struct.pack('!Q', int(time.time()) << 32)),
self.measureDict(),
]
sock.sendall(
msgpack.packb([
b'tag.name',
msgpack.packb(data),
{b'size': 1, b'compressed': b'text'},
], use_bin_type=False),
)
sock.close()
self.assertEqual(
data,
msgpack.unpackb(
self.serve(self._wendelin_port, WendelinHTTPRequestHandler)),
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/galene/ 0000775 0000000 0000000 00000000000 14241130220 0025012 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/galene/buildout.hash.cfg 0000664 0000000 0000000 00000001430 14241130220 0030242 0 ustar 00root root 0000000 0000000 # THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[instance-cfg]
filename = instance.cfg.in
md5sum = 38c79a4952a7cb63698135f1d1ed6c8c
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/galene/instance.cfg.in 0000664 0000000 0000000 00000005123 14241130220 0027705 0 ustar 00root root 0000000 0000000 [buildout]
parts =
publish-connection-parameter
stat-password
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
offline = true
[publish-connection-parameter]
recipe = slapos.cookbook:publish
url = https://[$${galene-wrapper:ip}]:$${galene-wrapper:port}
admin-user = $${admin-password:username}
admin-password = $${admin-password:passwd}
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration
computer = $${slap-connection:computer-id}
partition = $${slap-connection:partition-id}
url = $${slap-connection:server-url}
key = $${slap-connection:key-file}
cert = $${slap-connection:cert-file}
configuration.ice-servers.json = [{"urls":["stun:turn-paris-rapidspace.api.nexedi.net:443"]}]
[directory]
recipe = slapos.cookbook:mkdirectory
etc = $${buildout:directory}/etc
var = $${buildout:directory}/var
srv = $${buildout:directory}/srv
bin = $${buildout:directory}/bin
tmp = $${buildout:directory}/tmp
run = $${:var}/run
services = $${:etc}/service
data = $${:srv}/data
groups = $${:srv}/groups
recordings = $${:srv}/recordings
[galene-ssl]
recipe = plone.recipe.command
cert-file = $${directory:data}/cert.pem
key-file = $${directory:data}/key.pem
command = ${openssl:location}/bin/openssl req -newkey rsa:2048 -batch -new -x509 -days 3650 -nodes -keyout "$${:key-file}" -out "$${:cert-file}"
update-command =
stop-on-error = true
[admin-password]
recipe = slapos.cookbook:generate.password
storage-path = $${directory:data}/.passwd
username = admin
[stat-password]
recipe = slapos.recipe.template
inline =
$${admin-password:username}:$${admin-password:passwd}
output = $${directory:data}/passwd
[ice-servers.json]
recipe = slapos.recipe.template
inline =
$${slap-configuration:configuration.ice-servers.json}
output = $${directory:data}/ice-servers.json
[groups-json]
recipe = slapos.recipe.template
inline =
{
"public":true,
"op": [{"username":"$${admin-password:username}","password":"$${admin-password:passwd}"}],
"other": [],
"presenter": [{"username": "", "password": "nexedi"}]
}
output = $${directory:groups}/public.json
[galene-wrapper]
recipe = slapos.recipe.template
port = 8443
ip = $${slap-configuration:ipv6-random}
inline =
#!/bin/sh
ulimit -n $(ulimit -Hn)
exec ${gowork:bin}/galene \
-static ${galene:location}/static \
-recordings $${directory:recordings} \
-groups $${directory:groups} \
-data $${directory:data} \
-http [$${:ip}]:$${:port} \
-turn ""
output = $${directory:services}/galene
depends =
$${ice-servers.json:recipe}
$${groups-json:recipe}
$${galene-ssl:recipe}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/galene/software.cfg 0000664 0000000 0000000 00000001402 14241130220 0027322 0 ustar 00root root 0000000 0000000 [buildout]
extends =
../../component/golang/buildout.cfg
../../component/openssl/buildout.cfg
../../stack/slapos.cfg
buildout.hash.cfg
parts =
slapos-cookbook
eggs
instance-cfg
[python]
part = python3
# eggs for instance.cfg
[eggs]
recipe = zc.recipe.egg
eggs =
plone.recipe.command
collective.recipe.template
[galene]
<= go-git-package
go.importpath = lab.nexedi.com/nexedi/galene
repository = https://lab.nexedi.com/nexedi/galene.git
revision = 6669a93ae39ad83b8b3a222dd8210dfef8a7ed02
[gowork]
install =
${galene:location}:./...
environment =
CGO_ENABLED = 0
buildflags = -ldflags='-s -w'
[instance-cfg]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/instance.cfg
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/galene/test/ 0000775 0000000 0000000 00000000000 14241130220 0025771 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/galene/test/README.md 0000664 0000000 0000000 00000000042 14241130220 0027244 0 ustar 00root root 0000000 0000000 Tests for Galene software release
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/galene/test/setup.py 0000664 0000000 0000000 00000003604 14241130220 0027506 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.galene'
long_description = open("README.md").read()
setup(
name=name,
version=version,
description="Test for SlapOS' Galene",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'slapos.libnetworkcache',
'requests',
],
zip_safe=True,
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/galene/test/test.py 0000664 0000000 0000000 00000003641 14241130220 0027326 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2019 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from __future__ import unicode_literals
import os
import requests
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '../software.cfg')))
class TestGalene(SlapOSInstanceTestCase):
__partition_reference__ = 'G'
def setUp(self):
self.connection_parameters = self.computer_partition.getConnectionParameterDict()
def test_url_get(self):
resp = requests.get(self.connection_parameters['url'], verify=False)
self.assertEqual(requests.codes.ok, resp.status_code)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/ 0000775 0000000 0000000 00000000000 14241130220 0025021 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/buildout.hash.cfg 0000664 0000000 0000000 00000006055 14241130220 0030261 0 ustar 00root root 0000000 0000000 # THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[instance.cfg]
filename = instance.cfg.in
md5sum = 8e8edd7dadb9c331fdba826312e7e8d2
[watcher]
_update_hash_filename_ = watcher.in
md5sum = 90690e1351637f20ff2df57a6c3e85b4
[gitlab-export]
_update_hash_filename_ = gitlab-export.in
md5sum = a7b32680e80f34276f0a32a5e22dad50
[database.yml.in]
_update_hash_filename_ = template/database.yml.in
md5sum = 61d1d04b9347b3168a1ad7676e4681ef
[gitconfig.in]
_update_hash_filename_ = template/gitconfig.in
md5sum = eb1230fee50067924ba89f4dc6e82fa9
[gitlab-parameters.cfg]
_update_hash_filename_ = gitlab-parameters.cfg
md5sum = c2e23c0f7baa1633df0436ca4e728424
[gitlab-shell-config.yml.in]
_update_hash_filename_ = template/gitlab-shell-config.yml.in
md5sum = 52d18b521b8cd16352fc88b1e1d79d53
[gitlab-unicorn-startup.in]
_update_hash_filename_ = gitlab-unicorn-startup.in
md5sum = b0c3d465a8aaad9d2274934dcf208645
[gitlab.yml.in]
_update_hash_filename_ = template/gitlab.yml.in
md5sum = f4cc0bc898b8d59010d61473e2adc53b
[gitaly-config.toml.in]
_update_hash_filename_ = template/gitaly-config.toml.in
md5sum = 0f1ec4077dab586cc003ae13f689eda2
[instance-gitlab.cfg.in]
_update_hash_filename_ = instance-gitlab.cfg.in
md5sum = 0b023c7efd027f65b14e752484be2ec7
[instance-gitlab-export.cfg.in]
_update_hash_filename_ = instance-gitlab-export.cfg.in
md5sum = 9ed8220bb3ad71ff7e8638354127412c
[instance-gitlab-test.cfg.in]
_update_hash_filename_ = instance-gitlab-test.cfg.in
md5sum = 7ba08928e6a8998ec8ed1bb97851b726
[macrolib.cfg.in]
_update_hash_filename_ = macrolib.cfg.in
md5sum = a56a44e96f65f5ed20211bb6a54279f4
[nginx-gitlab-http.conf.in]
_update_hash_filename_ = template/nginx-gitlab-http.conf.in
md5sum = cd7471a8c5d6f6bc848c62ce62dca966
[nginx.conf.in]
_update_hash_filename_ = template/nginx.conf.in
md5sum = 8c904510eb39dc212204f68f2b81b068
[rack_attack.rb.in]
_update_hash_filename_ = template/rack_attack.rb.in
md5sum = 7d0e6dc6b826f6df6b20d8574a29e2f8
[resque.yml.in]
_update_hash_filename_ = template/resque.yml.in
md5sum = 7c89a730889e3224548d9abe51a2d719
[smtp_settings.rb.in]
_update_hash_filename_ = template/smtp_settings.rb.in
md5sum = 4e1ced687a86e4cfff2dde91237e3942
[template-gitlab-resiliency-restore.sh.in]
_update_hash_filename_ = template/template-gitlab-resiliency-restore.sh.in
md5sum = 16b9f52f00d55feab7e31a88029ad351
[unicorn.rb.in]
_update_hash_filename_ = template/unicorn.rb.in
md5sum = 67728235a2c4c9425c80f0c856749885
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/gitlab-export.in 0000664 0000000 0000000 00000001204 14241130220 0030127 0 ustar 00root root 0000000 0000000 #!{{ bash.location }}/bin/bash -e
# export gitlab site via `gitlab-backup pull` to a "for-export" git-backup repository
# gitlab-export
#
# NOTE gitlab-backup, gitlab-rails, ... all have to be on $PATH.
# which site to export is determined by which gitlab-rails is on $PATH.
if [ "$#" -ne 1 ]; then
echo "Usage: gitlab-export " 1>&2
exit 1
fi
exportto_repo="$1"
# create / setup export repository if it does not exist yet
mkdir -p "$exportto_repo"
cd "$exportto_repo"
# verify we are in a git repository
if ! git rev-parse --is-inside-git-dir ; then
git init --bare
fi
exec gitlab-backup pull
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/gitlab-parameters.cfg 0000664 0000000 0000000 00000011535 14241130220 0031112 0 ustar 00root root 0000000 0000000 # Upstream parameters for a GitLab instance
#
# Selected parameters - main ones - names and advanced defaults taken from omnibus-gitlab
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-config-template/gitlab.rb.template
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/attributes/default.rb
#
# TODO better autogenerate from ^^^ (?)
#
# (last updated for omnibus-gitlab 8.8.9+ce.0-g25376053)
[gitlab-parameters]
configuration.external_url = http://lab.example.com
# db advanced
configuration.db_pool = 10
# rack-attack
configuration.rate_limit_requests_per_period = 10
configuration.rate_limit_period = 60
configuration.time_zone = UTC
configuration.email_enabled = true
configuration.email_from = lab@example.com
configuration.email_display_name = GitLab
configuration.email_reply_to = noreply@example.com
configuration.smtp_enable = true
configuration.smtp_address = smtp.server
configuration.smtp_port = 465
configuration.smtp_user_name = smtp user
configuration.smtp_password = smtp password
configuration.smtp_domain = lab.example.com
configuration.smtp_authentication = login
configuration.smtp_enable_starttls_auto = true
# none | peer | client_once | fail_if_no_peer_cert -> see gitlab-omnibus links at top
configuration.smtp_openssl_verify_mode = peer
configuration.default_can_create_group = true
configuration.username_changing_enabled = true
configuration.default_theme = 2
configuration.default_projects_features.issues = true
configuration.default_projects_features.merge_requests = true
configuration.default_projects_features.wiki = true
configuration.default_projects_features.snippets = true
configuration.default_projects_features.builds = true
configuration.webhook_timeout = 10
# 0 means forever (seconds)
configuration.backup_keep_time = 0
# NOTE empty = default gitlab limits
configuration.git_max_size =
configuration.git_timeout =
# sidekiq
configuration.sidekiq_shutdown_timeout = 4
configuration.sidekiq_concurrency = 25
configuration.sidekiq_memory_killer_max_rss = 1000000
# unicorn
configuration.unicorn_worker_timeout = 60
configuration.unicorn_worker_processes = 2
# unicorn advanced
configuration.unicorn_backlog_socket = 1024
configuration.unicorn_worker_memory_limit_min = 300*(1024**2)
configuration.unicorn_worker_memory_limit_max = 350*(1024**2)
# nginx
configuration.nginx_client_max_body_size = 0
# NOTE: we don't really need old ciphers - usually we talk directly to frontend only
configuration.nginx_ssl_ciphers = ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4
configuration.nginx_ssl_prefer_server_ciphers = on
configuration.nginx_ssl_protocols = TLSv1 TLSv1.1 TLSv1.2
# the following is gitlab-omnibus default but not nginx's default
configuration.nginx_ssl_session_cache = builtin:1000 shared:SSL:10m
configuration.nginx_ssl_session_timeout = 5m
configuration.nginx_proxy_read_timeout = 300
configuration.nginx_proxy_connect_timeout = 300
# nginx advanced
configuration.nginx_worker_processes = 4
configuration.nginx_worker_connections = 10240
configuration.nginx_log_format = $remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent"
configuration.nginx_sendfile = on
configuration.nginx_tcp_nopush = on
configuration.nginx_tcp_nodelay = on
configuration.nginx_gzip = on
configuration.nginx_gzip_http_version = 1.0
configuration.nginx_gzip_comp_level = 2
configuration.nginx_gzip_proxied = any
configuration.nginx_gzip_types = text/plain text/css application/x-javascript text/xml application/xml application/xml+rss text/javascript application/json
configuration.nginx_keepalive_timeout = 65
configuration.nginx_header_allow_origin = $http_origin
configuration.nginx_hsts_max_age = 31536000
configuration.nginx_hsts_include_subdomains = false
configuration.nginx_gzip_enabled = true
# configuring trusted proxies
# GitLab is behind a reverse proxy, so we don't want the IP address of the proxy
# to show up as the client address (because rack attack blacklists the lab
# frontend)
configuration.nginx_real_ip_trusted_addresses =
configuration.nginx_real_ip_header = X-Forwarded-For
configuration.nginx_real_ip_recursive = off
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/gitlab-unicorn-startup.in 0000664 0000000 0000000 00000004765 14241130220 0032002 0 ustar 00root root 0000000 0000000 #!{{ bash_bin }}
# start up gitlab's unicorn with first making sure db is properly setup and all
# migrations are up as pre-condition.
RAKE={{ gitlab_rake }}
die() {
echo "$*" 1>&2
exit 1
}
# run psql on gitlab db
psql() {
{{ psql_bin }} \
-h {{ pgsql['pgdata-directory'] }} \
-U {{ pgsql.superuser }} \
-d {{ pgsql.dbname }} \
"$@"
}
# 1. what to do when instance is initially setup
# see
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/recipes/database_migrations.rb
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/recipes/postgresql.rb
# initial db setup
# ( first quering PG several times waiting a bit till postgresql is started and ready )
tpgwait=5
while true; do
pgtables="$(psql -c '\d' 2>&1)" && break
tpgwait=$(( $tpgwait - 1 ))
test $tpgwait = 0 && die "pg query problem"
echo "I: PostgreSQL is not ready (yet ?); will retry $tpgwait times..." 1>&2
sleep 1
done
echo "I: PostgreSQL ready." 1>&2
# make sure pg_trgm extension is enabled for gitlab db
psql -c 'CREATE EXTENSION IF NOT EXISTS pg_trgm;' || die "pg_trgm setup failed"
if echo "$pgtables" | grep -q '^Did not find any relations' ; then
$RAKE db:schema:load db:seed_fu || die "initial db setup failed"
fi
# re-build ssh keys
# (we do not use them - just for cleannes)
force=yes $RAKE gitlab:shell:setup || die "gitlab:shell:setup failed"
# 2. what to do when instance is upgraded
# see
# https://gitlab.com/gitlab-org/gitlab-ce/blob/master/lib/support/deploy/deploy.sh
# https://gitlab.com/gitlab-org/gitlab-ce/blob/master/lib/gitlab/upgrader.rb
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/recipes/gitlab-rails.rb
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-ctl-commands/upgrade.rb
#
# Assets compilation is handled at instance deployment time. We do everything else here.
# make sure all migrations are up
migrate_log="{{ log_dir }}/db-migrate-`date +%s`.log"
$RAKE db:migrate >$migrate_log 2>&1 || die "db:migrate failed"
# if it was a no-op "migration" - we don't need info about that - only keep
# logs of actual migration run.
test -s $migrate_log || rm $migrate_log
# clear cache
$RAKE cache:clear || die "cache:clear failed"
# 3. finally exec to unicorn
exec {{ gitlab_unicorn }} \
-E production \
-c {{ unicorn_rb.output }} \
{{ gitlab_work.location }}/config.ru
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/gowork.cfg 0000664 0000000 0000000 00000002055 14241130220 0027014 0 ustar 00root root 0000000 0000000 # Code generated by gowork-snapshot; DO NOT EDIT.
# list of go git repositories to fetch
[gowork.goinstall]
depends_gitfetch =
${go_github.com_pkg_errors:recipe}
${go_lab.nexedi.com_kirr_git-backup:recipe}
${go_lab.nexedi.com_kirr_go123:recipe}
[go_github.com_libgit2_git2go]
<= go-git-package
go.importpath = github.com/libgit2/git2go
repository = https://github.com/libgit2/git2go.git
# branch 'next' is required by git-backup
revision = next-g5d0a4c752a74258a5f42e40fccd2908ac4e336b8
[go_github.com_pkg_errors]
<= go-git-package
go.importpath = github.com/pkg/errors
repository = https://github.com/pkg/errors.git
revision = v0.8.0-12-g816c908556
[go_lab.nexedi.com_kirr_git-backup]
<= go-git-package
go.importpath = lab.nexedi.com/kirr/git-backup
repository = https://lab.nexedi.com/kirr/git-backup.git
revision = 3f6c4deec8834bdcd2c28c7c5eeacd8211e759b5
[go_lab.nexedi.com_kirr_go123]
<= go-git-package
go.importpath = lab.nexedi.com/kirr/go123
repository = https://lab.nexedi.com/kirr/go123.git
revision = 56bf8f815a
instance-gitlab-export.cfg.in 0000664 0000000 0000000 00000006221 14241130220 0032414 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab # GitLab instance + site export support
[buildout]
extends = {{ instance_gitlab_cfg }}
# TODO + ${pbsready-export:output}
parts +=
cron-entry-gitlab-backup
resiliency-exclude-file
gitlab-resiliency-restore-script
# -export specific instance parameters
[instance-parameter]
# cron frequency for gitlab backup (default: every 4h)
configuration.backup_frequency = 0 */4 * * *
[gitlab-backup-directory]
recipe = slapos.cookbook:mkdirectory
srv = ${buildout:directory}/srv
backup = ${:srv}/backup
backup-gitlab.git = ${:backup}/backup-gitlab.git
var = ${buildout:directory}/var
pid = ${:var}/pid
# instance exporter script
[exporter]
recipe = slapos.cookbook:wrapper
wrapper-path = ${buildout:directory}/bin/gitlab-exporter
command-line = {{ xnice_repository_location }}/bin/xnice {{ gitlab_export }} ${gitlab-backup-directory:backup-gitlab.git}
pidfile = ${gitlab-backup-directory:pid}/gitlab-exporter.pid
environment =
# XXX: `/usr/bin` has to be in the PATH environment variable to be able to use
# `which` command in gitlab-backup, `chrt` in xnice, ...
# and `/bin` for `sed` command in gitlab-backup restore
PATH=${buildout:directory}/bin:{{ coreutils_location }}/bin:{{ grep_location }}/bin:{{ tar_location }}/bin:{{ gzip_location }}/bin:{{ gopath_bin }}:{{ git_location }}/bin:/bin:/usr/bin
[cron-entry-gitlab-backup]
<= cron-entry
# run backup script on a regular basis (given as instance parameter)
frequency = ${instance-parameter:configuration.backup_frequency}
command = ${exporter:wrapper-path}
[resiliency-exclude-file]
# Generate rdiff exclude file in case of resiliency
recipe = collective.recipe.template
input = inline: gitlab-shell-work*
gitlab-work*
var/log/**
var/backup/**
var/repositories*
var/repositories/**
srv/postgresql/**
srv/postgresql
etc/service/postgres-start
srv/redis/**
srv/unicorn/unicorn.socket
output = ${directory:srv}/exporter.exclude
[gitlab-resiliency-restore-script]
# script run by resilient stack to restore gitlab instance
# this section should be added only on runner import instance
recipe = slapos.recipe.template:jinja2
mode = 0700
url = {{ gitlab_restore_sh_in }}
output= ${directory:srv}/runner-import-restore
context =
raw bash_bin {{ bash_bin }}
raw go_work_bin {{ gopath_bin }}
raw git_location {{ git_location }}
raw bin_directory ${directory:bin}
raw etc_directory ${directory:etc}
raw run_directory ${directory:run}
raw postgress_script ${service-postgresql:services}/postgres-start
raw redis_script ${service-redis:wrapper}
raw unicorn_script ${service-unicorn:wrapper-path}
raw sidekiq_script ${service-sidekiq:wrapper-path}
raw gitlab_backup_dir ${gitlab-backup-directory:backup-gitlab.git}
raw redis_pid_file ${service-redis:pid-file}
raw postgres_pid_file ${service-postgresql:pgdata-directory}/postmaster.pid
raw gitlab_work_location ${gitlab-work:location}
raw promise_lab_location ${directory:promise.slow}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/instance-gitlab-test.cfg.in0000664 0000000 0000000 00000003203 14241130220 0032126 0 ustar 00root root 0000000 0000000 [buildout]
extends =
{{ instance_gitlab_export_cfg }}
parts +=
install-demo-backup
[root-password]
passwd = root1234
[service-unicorn]
environment =
GITLAB_ROOT_PASSWORD=${root-password:passwd}
[service-postgresql]
pgdata-directory = ${directory:srv}/pg
[gitlab-workhorse-dir]
recipe = slapos.cookbook:mkdirectory
srv = ${directory:srv}/glab-wh
[gitlab-workhorse]
srv = ${gitlab-workhorse-dir:srv}
socket = ${gitlab-workhorse:srv}/wh.socket
[unicorn]
socket = ${:srv}/unc.socket
[publish-instance-info]
password = ${root-password:passwd}
# token for default.user user in gitlab demo backup
# Edit this token if needed
private-token = SLurtnxPscPsU-SDm4oN
# raw URL for latest commit on setup.py in gitlab demo backup.
latest-file-uri = ${:backend_url}/open/slapos/raw/94c96d42c22e16836dadddac7c8061f4a8c6ca7a/setup.py
[instance-parameter]
# backup more often, 10 minutes seems the minimal
configuration.backup_frequency = */10 * * * *
[install-demo-backup]
recipe = plone.recipe.command
stop-on-error = false
backup-done = ${directory:var}/backup.ready
command =
if [ -f "${:backup-done}" ]; then
echo "Demo backup installed."
else
rm -rf ${secrets:secrets} ${directory:var}/backup/* &&
mkdir -p ${secrets:secrets} ${directory:var}/tmp &&
cp -r {{ gitlab_demo_backup_path }}/secrets/* ${secrets:secrets} &&
cp -rf {{ gitlab_demo_backup_path }}/backup-gitlab.git/ ${directory:var}/tmp &&
cd ${directory:var}/tmp/backup-gitlab.git/
PATH=${directory:bin}:{{ gopath_bin }}:{{ git_location }}/bin:$PATH
gitlab-backup restore -vupok -go HEAD &&
touch ${:backup-done}
fi
update-command = ${:command}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/instance-gitlab.cfg.in 0000664 0000000 0000000 00000060433 14241130220 0031161 0 ustar 00root root 0000000 0000000 # GitLab instance
# NOTE instance/software layout is inspired by gitlab omnibus
# NOTE all services are interconnected via unix sockets - because of easier
# security and performance reasons (unix has 2x less latency and more
# throughput compared to tcp over loopback).
[buildout]
extends =
{{ monitor_template }}
parts =
directory
publish-instance-info
# gitlab-
# ? mailroom
{% set gitlab_progv = 'rails rake unicorn sidekiq unicorn-startup' .split() %}
{% for prog in gitlab_progv %}
gitlab-{{ prog }}
{% endfor %}
gitconfig
gitlab-work
gitlab-shell-work
service-gitlab-workhorse
service-unicorn
service-sidekiq
service-nginx
service-postgresql
service-redis
promise-redis
service-gitaly
cron-service
cron-entry-logrotate
logrotate-entry-cron
on-reinstantiate
# std stuff for slapos instance
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
##################################
# GitLab instance parameters #
##################################
[instance-parameter]
{#- There are dangerous keys like recipe, etc #}
{#- XXX: Some other approach would be useful #}
{%- set DROP_KEY_LIST = ['recipe', '__buildout_signature__', 'computer', 'partition', 'url', 'key', 'cert'] %}
{%- for key, value in instance_parameter_dict.iteritems() -%}
{%- if key not in DROP_KEY_LIST %}
{{ key }} = {{ value }}
{%- endif -%}
{%- endfor %}
# for convenience
[external-url]
recipe = slapos.cookbook:urlparse
url = ${instance-parameter:configuration.external_url}
[backend-info]
host = ${instance-parameter:ipv6-random}
port = 7777
# whether to use http or https - determined by external url
url = ${external-url:scheme}://[${:host}]:${:port}
# current slapuserX
user = {{ pwd.getpwuid(os.getuid())[0] }}
[publish-instance-info]
recipe = slapos.cookbook:publish
backend_url = ${backend-info:url}
#############################
# GitLab instance setup #
#############################
# 1. directories
[directory]
recipe = slapos.cookbook:mkdirectory
home = ${buildout:directory}
bin = ${:home}/bin
etc = ${:home}/etc
var = ${:home}/var
log = ${:var}/log
run = ${:var}/run
srv = ${:home}/srv
# slapos startup/service/promise scripts live here:
startup = ${:etc}/run
service = ${:etc}/service
promise.slow = ${:etc}/promise.slow
# gitlab: etc/ log/ ...
[gitlab-dir]
recipe = slapos.cookbook:mkdirectory
etc = ${directory:etc}/gitlab
log = ${directory:log}/gitlab
var = ${directory:var}/gitlab
tmp = ${:var}/tmp
uploads = ${:var}/uploads
assets = ${:var}/assets
shared = ${:var}/shared
artifacts = ${:shared}/artifacts
lfs-objects = ${:shared}/lfs-objects
builds = ${:var}/builds
backup = ${directory:var}/backup
public = ${:var}/public
pages = ${:shared}/pages
[gitlab-repo-dir]
recipe = slapos.cookbook:mkdirectory
repositories = ${directory:var}/repositories
# gitlab wants it to be drwxrws---
# FIXME setting such mode with :mkdirectory is not possible, because mkdir(2)
# does & 0777 and also there is umask. So we workaround:
[gitlab-repo-xdir]
recipe = plone.recipe.command
stop-on-error = yes
repositories = ${gitlab-repo-dir:repositories}
command = chmod 02770 ${:repositories}
[gitlab]
etc = ${gitlab-dir:etc}
log = ${gitlab-dir:log}
var = ${gitlab-dir:var}
tmp = ${gitlab-dir:tmp}
uploads = ${gitlab-dir:uploads}
assets = ${gitlab-dir:assets}
shared = ${gitlab-dir:shared}
artifacts = ${gitlab-dir:artifacts}
lfs-objects = ${gitlab-dir:lfs-objects}
builds = ${gitlab-dir:builds}
backup = ${gitlab-dir:backup}
repositories = ${gitlab-repo-xdir:repositories}
public = ${gitlab-dir:public}
pages = ${gitlab-dir:shared}/pages
# gitlab-shell: etc/ log/ gitlab_shell_secret ...
[gitlab-shell-dir]
recipe = slapos.cookbook:mkdirectory
etc = ${directory:etc}/gitlab-shell
log = ${directory:log}/gitlab-shell
[gitlab-shell]
etc = ${gitlab-shell-dir:etc}
log = ${gitlab-shell-dir:log}
secret = ${secrets:secrets}/gitlab_shell_secret
hook =
# place to keep all secrets
[secrets]
recipe = slapos.cookbook:mkdirectory
secrets = ${directory:var}/secrets
mode = 0700
[gitaly-dir]
recipe = slapos.cookbook:mkdirectory
gitaly = ${directory:var}/gitaly
sockets = ${:gitaly}/sockets
internal = ${:sockets}/internal
log = ${directory:log}/gitaly
[gitaly]
socket = ${gitaly-dir:sockets}/gitaly.socket
log = ${gitaly-dir:log}
location = {{ gitaly_location }}
pid = ${directory:run}/gitaly.pid
internal_socket = ${gitaly-dir:internal}
# 2. configuration files
[etc-template]
recipe = slapos.recipe.template:jinja2
extensions = jinja2.ext.do
mode = 0640
import-list =
rawfile macrolib.cfg.in {{ macrolib_cfg_in }}
context =
raw autogenerated # This file was autogenerated. (DO NOT EDIT - changes will be lost)
section instance_parameter instance-parameter
section backend_info backend-info
import urlparse urlparse
raw git {{ git }}
${:context-extra}
context-extra =
[gitlab-etc-template]
<= etc-template
output= ${gitlab:etc}/${:_buildout_section_name_}
[nginx-etc-template]
<= etc-template
output= ${nginx:etc}/${:_buildout_section_name_}
[database.yml]
<= gitlab-etc-template
url = {{ database_yml_in }}
context-extra =
section pgsql service-postgresql
[gitconfig]
<= etc-template
url = {{ gitconfig_in }}
# NOTE put directly into $HOME/ - this way git will pick it up
output= ${directory:home}/.${:_buildout_section_name_}
[gitlab-shell-config.yml]
<= etc-template
url = {{ gitlab_shell_config_yml_in }}
output= ${gitlab-shell:etc}/config.yml
context-extra =
import urllib urllib
section gitlab gitlab
section gitlab_shell gitlab-shell
section gitlab_shell_work gitlab-shell-work
section unicorn unicorn
section service_redis service-redis
raw redis_binprefix {{ redis_binprefix }}
[gitlab.yml]
<= gitlab-etc-template
url = {{ gitlab_yml_in }}
context-extra =
import urllib urllib
section gitlab gitlab
section gitlab_shell gitlab-shell
section gitlab_shell_work gitlab-shell-work
section gitaly gitaly
[nginx.conf]
<= nginx-etc-template
url = {{ nginx_conf_in }}
context-extra =
section directory directory
section gitlab_workhorse gitlab-workhorse
raw nginx_mime_types {{ nginx_mime_types }}
raw nginx_gitlab_http_conf ${nginx-gitlab-http.conf:output}
[nginx-gitlab-http.conf]
<= nginx-etc-template
url = {{ nginx_gitlab_http_conf_in }}
context-extra =
section nginx nginx
section gitlab_work gitlab-work
section gitlab_workhorse gitlab-workhorse
[gitaly-config.toml]
<= etc-template
url = {{ gitaly_config_toml_in }}
output= ${directory:etc}/${:_buildout_section_name_}
context-extra =
import urllib urllib
section gitlab gitlab
section gitlab_shell_work gitlab-shell-work
section gitaly gitaly
[rack_attack.rb]
<= gitlab-etc-template
url = {{ rack_attack_rb_in }}
[resque.yml]
<= gitlab-etc-template
url = {{ resque_yml_in }}
context-extra =
section redis service-redis
[smtp_settings.rb]
<= gitlab-etc-template
url = {{ smtp_settings_rb_in }}
# contains smtp password
mode = 0600
[unicorn.rb]
<= gitlab-etc-template
url = {{ unicorn_rb_in }}
context-extra =
section unicorn unicorn
section directory directory
section gitlab_work gitlab-work
# 3. bin/
# gitlab-
[gitlab-bin]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:bin}/${:_buildout_section_name_}
# NOTE $HOME needed to pick gitconfig
environment =
PATH = {{ node_bin_location }}:{{ gopath_bin }}:{{ yarn_location }}/bin:/usr/local/bin:/usr/bin:/bin
BUNDLE_GEMFILE = {{ gitlab_repository_location }}/Gemfile
HOME = ${directory:home}
RAILS_ENV = production
SIDEKIQ_MEMORY_KILLER_MAX_RSS = ${instance-parameter:configuration.sidekiq_memory_killer_max_rss}
command-line =
{{ bundler_4gitlab }} exec sh -c
'cd ${gitlab-work:location} && ${:prog} "$@"' ${:prog}
{% for prog in gitlab_progv %}
[gitlab-{{ prog }}]
<= gitlab-bin
prog = {{ prog }}
{% endfor %}
[gitlab-unicorn-startup]
recipe = slapos.recipe.template:jinja2
mode = 0755
url = {{ gitlab_unicorn_startup_in }}
output= ${directory:bin}/${:_buildout_section_name_}
context =
raw bash_bin {{ bash_bin }}
raw gitlab_rake ${gitlab-rake:wrapper-path}
raw gitlab_unicorn ${gitlab-unicorn:wrapper-path}
raw psql_bin {{ postgresql_location }}/bin/psql
section pgsql service-postgresql
raw log_dir ${gitlab:log}
section unicorn_rb unicorn.rb
section gitlab_work gitlab-work
# 4. gitlab- & gitlab-shell- work directories
#
# Gitlab/Rails operation is tightened that config/ lives inside code, which goes
# against having ability to create several instances configured differently
# from 1 SR.
#
# One possibility to overcome this could be to make another Gitlab root
# symbolically linked to original SR _and_ several configuration files
# symbolically linked to instance place. Unfortunately this does not work -
# Ruby determines realpath on module import and Gitlab and Rails lookup config
# files relative to imported modules.
#
# we clone cloned gitlab and add proper links to vendor/bundle and instance
# config files.
# XXX there is no need for full clone - we only need worktree checkout (a-la `git
# worktree add`, but without creating files in original clone)
#
# This way Gitlab/Rails still think they work in 1 code / 1 instance way,
# and we can reuse SR.
# XXX better do such tricks with bind mounting, but that requires user namespaces
[work-base]
recipe = plone.recipe.command
stop-on-error = yes
location = ${directory:home}/${:_buildout_section_name_}
command =
# make sure we start from well-defined empty state
# (needed e.g. if previous install failed in the middle)
rm -rf ${:location} &&
# init work repository and add `software` remote pointing to main repo in SR software/...
{{ git }} init ${:location} &&
cd ${:location} &&
{{ git }} remote add software ${:software} &&
${:update-command}
update-command =
cd ${:location} &&
{{ git }} fetch software &&
{{ git }} reset --hard `cd ${:software} && {{ git }} rev-parse HEAD` &&
${:tune-command}
# NOTE there is no need to link/create .gitlab_shell_secret - we set path to it
# in gitlab & gitlab-shell configs, and gitlab creates it on its first start
[gitlab-work]
<= work-base
software = {{ gitlab_repository_location }}
tune-command =
# Initialise secrets
if [ ! -s "${secrets:secrets}/gitlab_secrets.yml" ]; then
cp config/secrets.yml.example ${secrets:secrets}/gitlab_secrets.yml;
fi
# secret* tmp/ log/ shared/ builds/ node_modules/
rm -f .secret &&
rm -rf log tmp shared builds node_modules &&
ln -sf ${secrets:secrets}/gitlab_rails_secret .secret &&
ln -sf ${gitlab:log} log &&
ln -sf ${gitlab:tmp} tmp &&
ln -sf ${gitlab:shared} shared &&
ln -sf ${gitlab:builds} builds &&
ln -sf {{ gitlab_repository_location }}/node_modules node_modules &&
ln -sf ${gitlab-workhorse:secret} .gitlab_workhorse_secret
# config/
cd config &&
ln -sf ${unicorn.rb:output} unicorn.rb &&
ln -sf ${gitlab.yml:output} gitlab.yml &&
ln -sf ${database.yml:output} database.yml &&
ln -sf ${resque.yml:output} resque.yml &&
ln -sf ${secrets:secrets}/gitlab_secrets.yml secrets.yml &&
# config/initializers/
cd initializers &&
ln -sf ${rack_attack.rb:output} rack_attack.rb &&
ln -sf ${smtp_settings.rb:output} smtp_settings.rb &&
# public/
cd ../../public &&
rm -rf uploads assets &&
ln -sf ${gitlab:uploads} uploads &&
ln -sf ${gitlab:assets} assets &&
true
# ----//---- for gitlab-shell
[gitlab-shell-work]
<= work-base
software = {{ gitlab_shell_repository_location }}
tune-command =
ln -sf ${gitlab-shell-config.yml:output} config.yml &&
true
# 5. services
# [promise-] to check by url
[promise-byurl]
<= monitor-promise-base
promise = check_command_execute
name = ${:_buildout_section_name_}.py
config-http-code = 200
#####################
# Postgresql db #
#####################
# XXX gitlab-omnibus also tunes:
# - shared_buffers
# - work_mem
# - checkpoint_*
# - effective_check_size
# - lc_* en_US.UTF-8 -> C (?)
[service-postgresql]
recipe = slapos.cookbook:postgres
bin = {{ postgresql_location }}/bin
services= ${directory:service}
dbname = gitlabhq_production
# NOTE db name must match to what was used in KVM on lab.nexedi.com (restore script grants access to this user)
superuser = gitlab-psql
# no password - pgsql will listen only on unix sockets (see below) thus access
# is protected with filesystem-level permissions.
# ( besides, if we use slapos.cookbook:generate.password and do `password = ...`
# the password is stored in plain text in .installed and thus becomes insecure )
password=
pgdata-directory = ${directory:srv}/postgresql
# empty addresses - listen only on unix socket
ipv4 =
ipv6 =
port =
depend =
${promise-postgresql:recipe}
[promise-postgresql]
<= monitor-promise-base
promise = check_command_execute
name = promise-postgresql.py
config-command =
{{ postgresql_location }}/bin/psql \
-h ${service-postgresql:pgdata-directory} \
-U ${service-postgresql:superuser} \
-d ${service-postgresql:dbname} \
-c '\q'
# postgresql logs to stdout/stderr - logs are handled by slapos not us
# [logrotate-entry-postgresql]
#############
# Redis #
#############
[redis]
recipe = slapos.cookbook:mkdirectory
srv = ${directory:srv}/redis
log = ${directory:log}/redis
[service-redis]
recipe = slapos.cookbook:redis.server
wrapper = ${directory:service}/redis
promise-wrapper = ${directory:bin}/redis-promise
server-dir = ${redis:srv}
config-file = ${directory:etc}/redis.conf
log-file = ${redis:log}/redis.log
pid-file = ${directory:run}/redis.pid
use-passwd = false
unixsocket = ${:server-dir}/redis.socket
# port = 0 means "don't listen on TCP at all" - listen only on unix socket
ipv6 = ::1
port = 0
server-bin = {{ redis_binprefix }}/redis-server
cli-bin = {{ redis_binprefix }}/redis-cli
depend =
${logrotate-entry-redis:recipe}
[promise-redis]
<= monitor-promise-base
promise = check_command_execute
name = promise-redis.py
config-command = ${service-redis:promise-wrapper}
[logrotate-entry-redis]
<= logrotate-entry-base
log = ${redis:log}/*.log
name = redis
########################
# gitlab-workhorse #
########################
[gitlab-workhorse-dir]
recipe = slapos.cookbook:mkdirectory
srv = ${directory:srv}/gitlab-workhorse
log = ${directory:log}/workhorse
[gitlab-workhorse]
srv = ${gitlab-workhorse-dir:srv}
socket = ${gitlab-workhorse:srv}/gitlab-workhorse.socket
log = ${gitlab-workhorse-dir:log}/gitlab-workhorse.log
secret = ${secrets:secrets}/gitlab_workhorse_secret
[service-gitlab-workhorse]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:service}/gitlab-workhorse
command-line = {{ gitlab_workhorse }}
-listenNetwork unix
-listenAddr ${gitlab-workhorse:socket}
-authSocket ${unicorn:socket}
-documentRoot ${gitlab-work:location}/public
-secretPath ${gitlab-workhorse:secret}
-logFile ${gitlab-workhorse:log}
# NOTE for profiling
# -pprofListenAddr ...
# NOTE environment for:
# - git to be available on path
# - ruby to be available on path (gitlab-workhorse -> gitlab-shell -> hooks on push)
# - gitconfig be found from ~/.gitconfig
environment =
PATH={{ git_location }}/bin:{{ ruby_location }}/bin:{{ gzip_location }}/bin:{{ bzip2_location}}/bin
HOME=${directory:home}
depend =
${promise-gitlab-workhorse:recipe}
${logrotate-entry-gitlab-workhorse:recipe}
[promise-gitlab-workhorse]
<= promise-byurl
# http://localhost/users/statics.css will not redirect to /users/sign_in anymore because of this commit:
# https://lab.nexedi.com/nexedi/gitlab-workhorse/commit/c81f109a62fecf2a847fb17ceed012b380dab49f#c1215002e6d745f05eaaf9ee1dad7752e85d866f_318_331
config-command = {{ curl_bin }} --unix-socket ${gitlab-workhorse:socket} http://localhost/users/sign_in
# gitlab-workhorse logs to stdout/stderr - logs are handled by slapos not us
# [logrotate-entry-gitlab-workhorse]
######################
# unicorn worker #
######################
[unicorn-dir]
recipe = slapos.cookbook:mkdirectory
srv = ${directory:srv}/unicorn
log = ${directory:log}/unicorn
[unicorn]
srv = ${unicorn-dir:srv}
log = ${unicorn-dir:log}
socket = ${:srv}/unicorn.socket
[service-unicorn]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:service}/unicorn
# NOTE we perform db setup / migrations as part of unicorn startup.
# Those operations require PG and Redis to be up and running already, that's
# why we do it here. See gitlab-unicorn-startup for details.
command-line = ${gitlab-unicorn-startup:output}
depend =
${promise-unicorn:recipe}
${promise-gitlab-app:recipe}
${promise-gitlab-shell:recipe}
${logrotate-entry-unicorn:recipe}
# gitlab is a service "run" under unicorn
# gitlab-shell is called by gitlab
# -> associate their logs rotation to here
${logrotate-entry-gitlab:recipe}
[promise-unicorn]
<= promise-byurl
config-command = {{ curl_bin }} --unix-socket ${unicorn:socket} http://localhost/
[promise-rakebase]
recipe = slapos.cookbook:wrapper
# FIXME gitlab-rake is too slow to load and promise timeouts
# that's why we instantiate to .slow/ (and this way promises are not run)
wrapper-path = !py!'${directory:promise.slow}/' + '${:_buildout_section_name_}'[8:]
rake = ${gitlab-rake:wrapper-path}
[promise-gitlab-app]
<= promise-rakebase
command-line = ${:rake} gitlab:app:check
[promise-gitlab-shell]
<= promise-rakebase
command-line = ${:rake} gitlab:gitlab_shell:check
# very slow
# rake gitlab:repo:check (fsck all repos)
[logrotate-entry-unicorn]
<= logrotate-entry-base
log = ${unicorn:log}/*.log
name = unicorn
[logrotate-entry-gitlab]
<= logrotate-entry-base
log = ${gitlab:log}/*.log
name = gitlab
[logrotate-entry-gitlab-shell]
<= logrotate-entry-base
log = ${gitlab-shell:log}/*.log
name = gitlab-shell
[logrotate-entry-gitlab-workhorse]
<= logrotate-entry-base
log = ${gitlab-workhorse-dir:log}//*.log
name = gitlab-shell
#######################################
# sidekiq background jobs manager #
#######################################
[sidekiq-dir]
recipe = slapos.cookbook:mkdirectory
log = ${directory:log}/sidekiq
[sidekiq]
log = ${sidekiq-dir:log}
# NOTE see queue list here:
# https://gitlab.com/gitlab-org/gitlab-ce/blob/master/Procfile
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/templates/default/sv-sidekiq-run.erb
# (last updated for omnibus-gitlab 8.8.9+ce.0-g25376053)
[service-sidekiq]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:service}/sidekiq
command-line =
# NOTE Sidekiq memory killer makes sidekiq processes to exit, or if exit request
# not handled in time, to be SIGKILL terminated, and relies on managing service
# to restart it. In slapos we don't have mechanism to set autorestart=true, nor
# bang/watchdog currently work with slapproxy, so we do the monitoring ourselves.
{{ watcher }} 0,SIGKILL
${gitlab-sidekiq:wrapper-path}
# XXX -q runner ? (present in gitlab-ce/Procfile but not in omnibus)
# XXX -q pages -q elasticsearch ? (present in omnibus but not in gitlab-ce -- those features are gitlab-ee only)
# XXX -P ? (pidfile)
-e production
-r ${gitlab-work:location}
-t ${instance-parameter:configuration.sidekiq_shutdown_timeout}
-c ${instance-parameter:configuration.sidekiq_concurrency}
-L ${sidekiq:log}/sidekiq.log
-C ${gitlab-work:location}/config/sidekiq_queues.yml
depend =
${promise-sidekiq:recipe}
${logrotate-entry-sidekiq:recipe}
[promise-sidekiq]
<= promise-rakebase
command-line = ${:rake} gitlab:sidekiq:check
[logrotate-entry-sidekiq]
<= logrotate-entry-base
log = ${sidekiq:log}/*.log
name = sidekiq
######################
# Nginx frontend #
######################
# srv/nginx/ prefix + etc/ log/ ...
[nginx-dir]
recipe = slapos.cookbook:mkdirectory
srv = ${directory:srv}/nginx
etc = ${directory:etc}/nginx
log = ${directory:log}/nginx
[nginx-ssl-dir]
recipe = slapos.cookbook:mkdirectory
ssl = ${nginx-dir:etc}/ssl
# contains https key
mode = 0700
# self-signed certificate for https
[nginx-generate-certificate]
# NOTE there is slapos.cookbook:certificate_authority.request but it requires
# to start whole service and has up to 60 seconds latency to generate
# certificate. We only need to run 1 command to do it...
recipe = plone.recipe.command
stop-on-error = true
cert_file = ${nginx-ssl-dir:ssl}/gitlab_backend.crt
key_file = ${nginx-ssl-dir:ssl}/gitlab_backend.key
command =
test -e ${:key_file} || \
{{ openssl_bin }} req -newkey rsa -batch -new -x509 -days 3650 -nodes \
-keyout ${:key_file} -out ${:cert_file}
update-command = ${:command}
[nginx]
srv = ${nginx-dir:srv}
etc = ${nginx-dir:etc}
log = ${nginx-dir:log}
ssl = ${nginx-ssl-dir:ssl}
cert_file = ${nginx-generate-certificate:cert_file}
key_file = ${nginx-generate-certificate:key_file}
[nginx-symlinks]
# (nginx wants /logs to be there from start - else it issues alarm to the log)
recipe = cns.recipe.symlink
symlink = ${nginx:log} = ${nginx:srv}/logs
[service-nginx]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:service}/nginx
command-line = {{ nginx_bin }} -p ${nginx:srv} -c ${nginx.conf:output}
depend =
${nginx-symlinks:recipe}
${promise-nginx:recipe}
${logrotate-entry-nginx:recipe}
[promise-nginx]
<= promise-byurl
# XXX this depends on gitlab-workhorse being up
# (nginx is configured to proxy all requests to gitlab-workhorse)
config-url = ${backend-info:url}/users/sign_in
promise = check_url_available
[logrotate-entry-nginx]
<= logrotate-entry-base
log = ${nginx:log}/*.log
name = nginx
# base entry for clients who registers to cron
[cron-entry]
recipe = slapos.cookbook:cron.d
# name = .strip_prefix('cron-entry-')
# XXX len() is not available in !py! - 11 hardcoded
name = !py!'${:_buildout_section_name_}' [11:]
# NOTE _not_ ${service-cron:cron-entries} - though the value is the same we do
# not want service-cron to be instantiated just if a cron-entry is registered.
cron-entries = ${cron:cron-entries}
######################
# gitaly worker #
######################
# https://docs.gitlab.com/ee/install/installation.html
[service-gitaly]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:service}/gitaly
#command-line = ${gitlab-work:location}/bin/daemon_with_pidfile ${gitaly:pid}
command-line = {{ gitaly_location }}/gitaly ${gitaly-config.toml:output}
environment =
PATH={{ bundler_1_17_3_dir }}:{{ ruby_location }}/bin:/bin:/usr/bin
# 6. on-reinstantiate actions
# NOTE here we only recompile assets. Other on-reinstantiate actions, which
# require pg and redis running, are performed as part of unicorn service -
# right before its startup (see gitlab-unicorn-startup).
[on-reinstantiate]
recipe = plone.recipe.command
stop-on-error = true
rake = ${gitlab-rake:wrapper-path}
# run command on every reinstantiation
update-command = ${:command}
# https://gitlab.com/gitlab-org/gitlab-foss/issues/38457
# we need to manually install ajv@^4.0.0 with yarn to fix the bug 'yarn check failed!'
command =
${:rake} gitlab:assets:clean &&
${:rake} gettext:compile RAILS_ENV=production &&
cd ${gitlab-work:location} &&
PATH={{ node_bin_location }}:$PATH {{ yarn_location }}/bin/yarn add ajv@^4.11.2 &&
PATH={{ node_bin_location }}:$PATH {{ yarn_location }}/bin/yarn install --production --pure-lockfile &&
${:rake} gitlab:assets:compile NODE_ENV=production NODE_OPTIONS="--max_old_space_size=4096" &&
true
# Promise, gitlab can connect to gitaly:
# sudo gitlab-rake gitlab:tcp_check[GITALY_SERVER_IP,GITALY_LISTEN_PORT]
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/instance.cfg.in 0000664 0000000 0000000 00000012534 14241130220 0027720 0 ustar 00root root 0000000 0000000 # GitLab "switch-softwaretype" instance
[buildout]
extends =
${gitlab-parameters.cfg:target}
parts =
switch-softwaretype
# std stuff for slapos instance
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
offline = true
[switch-softwaretype]
recipe = slapos.cookbook:switch-softwaretype
gitlab = instance-gitlab.cfg:output
gitlab-export = instance-gitlab-export.cfg:output
gitlab-test = instance-gitlab-test.cfg:output
RootSoftwareInstance = $${:gitlab}
# TODO -import, -pull-backup
[worker-processes]
recipe = slapos.recipe.build
init =
import multiprocessing
cpu_count = multiprocessing.cpu_count()
# automatically load all available CPUs
options['unicorn-worker-processes'] = cpu_count + 1
options['nginx-worker-processes'] = cpu_count
[slap-configuration]
# std stuff to fetch slapos instance parameters
recipe = slapos.cookbook:slapconfiguration
computer= $${slap-connection:computer-id}
partition=$${slap-connection:partition-id}
url = $${slap-connection:server-url}
key = $${slap-connection:key-file}
cert = $${slap-connection:cert-file}
# autogenerated gitlab instance parameters
<= gitlab-parameters
# adjust/override some default settings:
configuration.unicorn_worker_processes = $${worker-processes:unicorn-worker-processes}
configuration.nginx_worker_processes = $${worker-processes:nginx-worker-processes}
# gitlab non-native parameters
configuration.icp_license =
# macro: render instance-*.cfg from instance-*.cfg.in
[instance-cfg]
recipe = slapos.recipe.template:jinja2
mode = 0644
output= $${buildout:directory}/$${:_buildout_section_name_}
context =
import os os
import pwd pwd
key bin_directory buildout:bin-directory
key eggs_directory buildout:eggs-directory
key develop_eggs_directory buildout:develop-eggs-directory
raw gitlab_repository_location ${gitlab-repository:location}
raw gitlab_shell_repository_location ${gitlab-shell-repository:location}
section instance_parameter_dict slap-configuration
# program binaries
raw bash_bin ${bash:location}/bin/bash
raw bzip2_location ${bzip2:location}
raw bundler_4gitlab ${bundler-4gitlab:bundle}
raw bundler_1_17_3_dir ${bundler-4gitlab:bundle1.17.3}
raw coreutils_location ${coreutils:location}
raw curl_bin ${curl:location}/bin/curl
raw dcron_bin ${dcron-output:crond}
raw git ${git:location}/bin/git
raw git_location ${git:location}
raw gitaly_location ${gitaly-repository:location}
raw gitlab_export ${gitlab-export:output}
raw gitlab_workhorse ${gowork:bin}/gitlab-workhorse
raw gopath_bin ${gowork:bin}
raw gunzip_bin ${gzip:location}/bin/gunzip
raw grep_location ${grep:location}
raw gzip_bin ${gzip:location}/bin/gzip
raw gzip_location ${gzip:location}
raw logrotate_bin ${logrotate:location}/usr/sbin/logrotate
raw nginx_bin ${nginx-output:nginx}
raw nginx_mime_types ${nginx-output:mime}
raw node_bin_location ${nodejs-8.12.0:location}/bin/
raw openssl_bin ${openssl-output:openssl}
raw postgresql_location ${postgresql10:location}
raw redis_binprefix ${redis28:location}/bin
raw ruby_location ${bundler-4gitlab:ruby-location}
raw tar_location ${tar:location}
raw watcher ${watcher:output}
raw xnice_repository_location ${xnice-repository:location}
raw yarn_location ${yarn:location}
# config files
raw database_yml_in ${database.yml.in:target}
raw gitconfig_in ${gitconfig.in:target}
raw monitor_template ${monitor2-template:output}
raw gitlab_shell_config_yml_in ${gitlab-shell-config.yml.in:target}
raw gitlab_unicorn_startup_in ${gitlab-unicorn-startup.in:target}
raw gitlab_yml_in ${gitlab.yml.in:target}
raw gitaly_config_toml_in ${gitaly-config.toml.in:target}
raw macrolib_cfg_in ${macrolib.cfg.in:target}
raw nginx_conf_in ${nginx.conf.in:target}
raw nginx_gitlab_http_conf_in ${nginx-gitlab-http.conf.in:target}
raw rack_attack_rb_in ${rack_attack.rb.in:target}
raw resque_yml_in ${resque.yml.in:target}
raw smtp_settings_rb_in ${smtp_settings.rb.in:target}
raw gitlab_restore_sh_in ${template-gitlab-resiliency-restore.sh.in:target}
raw unicorn_rb_in ${unicorn.rb.in:target}
$${:context-extra}
context-extra =
[instance-gitlab.cfg]
<= instance-cfg
url = ${instance-gitlab.cfg.in:target}
[instance-gitlab-export.cfg]
<= instance-cfg
url = ${instance-gitlab-export.cfg.in:target}
context-extra =
raw instance_gitlab_cfg $${instance-gitlab.cfg:output}
[instance-gitlab-test.cfg]
<= instance-cfg
url = ${instance-gitlab-test.cfg.in:target}
context-extra =
raw instance_gitlab_cfg $${instance-gitlab.cfg:output}
raw instance_gitlab_export_cfg $${instance-gitlab-export.cfg:output}
raw gitlab_demo_backup_path ${gitlab-demo-backup.git:location}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/macrolib.cfg.in 0000664 0000000 0000000 00000001431 14241130220 0027676 0 ustar 00root root 0000000 0000000 {# common macros for gitlab instance #}
{# cfg(name) -> instance_parameter:configuration. #}
{% macro cfg(name) %}{{ instance_parameter[str("configuration." + name)] }}{% endmacro %}
{# cfg_bool(name) - like cfg(name), but returns 'true'/''
NOTE macros can return only strings - that's why '' is used for false #}
{% macro cfg_bool(name) %}{{ 'true' if (cfg(name).lower() in ('true', 'yes')) else '' }}{% endmacro %}
{# deduce whether to use https from external url
( here - becasue we cannot use jinja2 logic in instance-gitlab.cfg.in to
process instance parameters ) #}
{% set external_url = urlparse.urlparse(cfg('external_url')) %}
{% set cfg_https = (true if external_url.scheme == 'https' else false) %}
{# for convenience #}
{% set fqdn = external_url.hostname %}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/software.cfg 0000664 0000000 0000000 00000027315 14241130220 0027344 0 ustar 00root root 0000000 0000000 # GitLab software-release
[buildout]
extends =
buildout.hash.cfg
../../stack/slapos.cfg
../../stack/nodejs.cfg
../../stack/monitor/buildout.cfg
../../component/ruby/buildout.cfg
../../component/golang/buildout.cfg
../../component/postgresql/buildout.cfg
../../component/redis/buildout.cfg
../../component/cmake/buildout.cfg
../../component/icu/buildout.cfg
../../component/pkgconfig/buildout.cfg
../../component/openssl/buildout.cfg
../../component/nginx/buildout.cfg
../../component/xz-utils/buildout.cfg
../../component/zlib/buildout.cfg
gowork.cfg
# for instance
../../component/coreutils/buildout.cfg
../../component/bash/buildout.cfg
../../component/grep/buildout.cfg
../../component/bzip2/buildout.cfg
../../component/curl/buildout.cfg
../../component/tar/buildout.cfg
../../component/gzip/buildout.cfg
../../component/dcron/buildout.cfg
../../component/logrotate/buildout.cfg
parts =
ruby2.3
golang1.12
git
postgresql10
redis28
cmake
icu
pkgconfig
nginx-output
gowork
gitlab-workhorse
gitaly-build
python-4gitlab
gitlab-shell/vendor
gitlab/vendor/bundle
gitlab_npm
github-markup-patch
gitlab-backup
# for instance
instance.cfg
slapos-cookbook
eggs
bash
curl
watcher
gitlab-export
gzip
dcron-output
logrotate
[slapos.cookbook-repository]
revision = 571d6514f7290e8faa9439c4b86aa2f6c87df261
[nodejs]
<= nodejs-8.12.0
[yarn]
<= yarn-1.3.2
############################
# Software compilation #
############################
# python with eggs, that will be used in gitlab
[python-4gitlab]
recipe = zc.recipe.egg
interpreter = python2
eggs =
docutils
# rubygemsrecipe with fixed url and this way pinned rubygems version
[rubygemsrecipe]
recipe = rubygemsrecipe
url = https://rubygems.org/rubygems/rubygems-3.1.2.zip
# bundler, that we'll use to
# - install gems for gitlab
# - run gitlab services / jobs (via `bundle exec ...`)
[bundler-4gitlab]
<= rubygemsrecipe
ruby-location = ${ruby2.3:location}
ruby-executable = ${:ruby-location}/bin/ruby
gems =
bundler==1.17.3
# bin installed here
bundle = ${buildout:bin-directory}/bundle
# Gitaly need bundler 1.17.3 which is not the default version at the end
bundle1.17.3 = ${buildout:parts-directory}/${:_buildout_section_name_}/lib/ruby/gems/1.8/gems/bundler-1.17.3/exe/
# install together with dependencies of gitlab, which we cannot specify using
# --with-... gem option
# ( reason: rubygemsrecipe hardcodes PATH inside generated bin/* and it is
# impossible to adjust it later )
#
# bundle exec ; starts with `#!/usr/bin/env ruby` as rubygems
# Rugged needs: cmake, pkgconfig
# execjs needs: nodejs
# rails needs db client program on path: psql
# gitlab wants to check redis version via running: redis-cli
# gitlab wants git to be really on path ( it uses git from abspath defined in
# gitlab.yml, but there are not all cases like this, e.g. in
# https://gitlab.com/gitlab-org/gitlab_git/blob/2f0d3c1a/lib/gitlab_git/repository.rb#L259 )
# gitlab (via github-markup) wants to convert rst -> html via running: python2 (with docutils egg)
# (python-4gitlab puts interpreter into ${buildout:bin-directory})
environment =
PATH = ${yarn:location}/bin:${:ruby-location}/bin:${cmake:location}/bin:${pkgconfig:location}/bin:${nodejs:location}/bin:${postgresql10:location}/bin:${redis28:location}/bin:${git:location}/bin:${buildout:bin-directory}:%(PATH)s
# gitlab, gitlab-shell & gitlab-workhorse checked out as git repositories
# pinned to exact commit
[git-repository]
recipe = slapos.recipe.build:gitclone
git-executable = ${git:location}/bin/git
[gitlab-repository]
<= git-repository
repository = https://lab.nexedi.com/nexedi/gitlab-ce.git
# 9.5.10 + NXD patches:
revision = v9.5.10-9-g69b0ffae00bf
location = ${buildout:parts-directory}/gitlab
[gitlab-shell-repository]
<= git-repository
#repository = https://lab.nexedi.com/nexedi/gitlab-shell.git
repository = https://gitlab.com/gitlab-org/gitlab-shell.git
# gitlab 9.5.10 wants gitlab-shell 5.6.1
revision = v5.6.1-10-g1e587d3b7f
location = ${buildout:parts-directory}/gitlab-shell
[gitaly-repository]
<= git-repository
repository = https://gitlab.com/gitlab-org/gitaly.git
# for version v0.35.0 (gitlab 9.5.10)
revision = v0.35.0-0-gf99a57b19a
location = ${buildout:parts-directory}/gitaly
[gitlab-workhorse-repository]
<= git-repository
repository = https://lab.nexedi.com/nexedi/gitlab-workhorse.git
revision = v3.0.0-8-g74793ad3cc
# Patch github markup to not call "python2 -S /path/to/rest2html" but only "python2 /path/to/rest2html"
# NOTE github-markup invokes it as `python2`, that's why we are naming it this way
# https://github.com/github/markup/blob/5393ae93/lib/github/markups.rb#L36
[github-markup-patch]
recipe = plone.recipe.command
command =
files=$(ls ${gitlab-repository:location}/vendor/bundle/ruby/*/gems/git*-markup-*/lib/github/markups.rb) || true
if [ ! -z "$files" ]; then
for file in $files; do
sed -i 's#python2 -S#python2#' $file
done
fi
update-command = ${:command}
stop-on-error = True
# build needed-by-gitlab gems via bundler
[gitlab/vendor/bundle]
recipe = slapos.recipe.cmmi
path = ${gitlab-repository:location}
bundle = ${bundler-4gitlab:bundle}
configure-command = cd ${:path} &&
${:bundle} config --local build.charlock_holmes --with-icu-dir=${icu:location} &&
${:bundle} config --local build.pg --with-pg-config=${postgresql10:location}/bin/pg_config &&
${:bundle} config --local build.re2 --with-re2-dir=${re2:location} &&
${:bundle} config --local build.nokogiri --with-zlib-dir=${zlib:location} --with-cflags=-I${xz-utils:location}/include --with-ldflags="-L${xz-utils:location}/lib -Wl,-rpath=${xz-utils:location}/lib"
make-binary =
make-targets= cd ${:path} &&
${:bundle} install --deployment --without development test mysql aws kerberos
environment =
PKG_CONFIG_PATH=${openssl-1.0:location}/lib/pkgconfig:${re2:location}/lib/pkgconfig:${xz-utils:location}/lib/pkgconfig
PATH=${pkgconfig:location}/bin:%(PATH)s
CFLAGS=-I${xz-utils:location}/include
################## Google re2
[re2]
recipe = slapos.recipe.cmmi
url = https://github.com/google/re2/archive/2019-12-01.tar.gz
md5sum = 527eab0c75d6a1a0044c6eefd816b2fb
configure-command = :
[gitlab_npm]
recipe = slapos.recipe.cmmi
path = ${gitlab-repository:location}
configure-command = :
make-binary =
make-targets= cd ${:path} && npm install
environment =
PATH=${nodejs:location}/bin/:%(PATH)s
#our go infrastructure not currently supporting submodules, IIRC
# https://lab.nexedi.com/nexedi/slapos/merge_requests/337
[go_github.com_libgit2_git2go_prepare]
recipe = slapos.recipe.cmmi
path = ${go_github.com_libgit2_git2go:location}
configure-command = :
make-binary =
make-targets= cd ${go_github.com_libgit2_git2go:location}
&& git submodule update --init
&& sed -i 's/.*--build.*/cmake --build . --target install/' script/build-libgit2-static.sh
&& make install
environment =
PKG_CONFIG_PATH=${openssl-1.0:location}/lib/pkgconfig:${zlib:location}/lib/pkgconfig
PATH=${cmake:location}/bin:${pkgconfig:location}/bin:${git:location}/bin:${golang1.12:location}/bin:${buildout:bin-directory}:%(PATH)s
GOPATH=${gowork:directory}
[gowork.goinstall]
git2go = ${go_github.com_libgit2_git2go_prepare:path}/vendor/libgit2/install
command = bash -c ". ${gowork:env.sh} && CGO_CFLAGS=-I${:git2go}/include CGO_LDFLAGS='-L${:git2go}/lib -lgit2' go install ${gowork:buildflags} -v $(echo -n '${gowork:install}' |tr '\n' ' ')"
[gowork]
golang = ${golang1.12:location}
# gitlab.com/gitlab-org/gitlab-workhorse
# gitlab.com/gitlab-org/gitlab-workhorse/cmd/gitlab-zip-cat
# gitlab.com/gitlab-org/gitlab-workhorse/cmd/gitlab-zip-metadata
install =
lab.nexedi.com/kirr/git-backup
cpkgpath =
${openssl-1.0:location}/lib/pkgconfig
${zlib:location}/lib/pkgconfig
${go_github.com_libgit2_git2go_prepare:path}/vendor/libgit2/install/lib/pkgconfig
buildflags = --tags "static"
[gitlab-workhorse]
recipe = slapos.recipe.cmmi
path = ${gitlab-workhorse-repository:location}
md5sum = 2988c944d58c4a08880498c4981cc7b7
configure-command = :
make-binary =
make-targets =
. ${gowork:env.sh} && make install PREFIX=${gowork:directory}
[gitlab-backup]
recipe = plone.recipe.command
command =
cp -a ${go_lab.nexedi.com_kirr_git-backup:location}/contrib/gitlab-backup ${gowork:bin}
update-command = ${:command}
[gitaly-build]
recipe = slapos.recipe.cmmi
path = ${gitaly-repository:location}
bundle = ${bundler-4gitlab:bundle}
configure-command = cd ${:path}/ruby &&
${:bundle} config --local build.charlock_holmes --with-icu-dir=${icu:location}
make-binary =
make-targets =
. ${gowork:env.sh} &&
unset GOBIN &&
make
environment =
PKG_CONFIG_PATH=${openssl-1.0:location}/lib/pkgconfig:${icu:location}/lib/pkgconfig
PATH=${pkgconfig:location}/bin:${ruby2.3:location}/bin:%(PATH)s
[xnice-repository]
# to get kirr's misc repo containing xnice script for executing processes
# with lower priority (used for backup script inside the cron)
<= git-repository
repository = https://lab.nexedi.com/kirr/misc.git
revision = 4073572ea700bf1b115f3a135aebebe5b3b824e4
location = ${buildout:parts-directory}/misc
# build needed-by-gitlab-shell gems via bundler
# ( there is not vendor/ dir in gitlab-shell, so to avoid having buildout error
# on mkdir vendor/bundle, this part name is just /vendor )
[gitlab-shell/vendor]
recipe = slapos.recipe.cmmi
path = ${gitlab-shell-repository:location}
bundle = ${bundler-4gitlab:bundle}
configure-command = true
make-binary =
make-targets= cd ${:path} &&
${:bundle} install --deployment --without development test
###############################
# Trampoline for instance #
###############################
# eggs for instance.cfg
[eggs]
recipe = zc.recipe.egg
eggs =
plone.recipe.command
cns.recipe.symlink
collective.recipe.template
[instance.cfg]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/instance.cfg
# macro: download a shell script and put it rendered into /bin/
[binsh]
recipe = slapos.recipe.template:jinja2
url = ${:_profile_base_location_}/${:_update_hash_filename_}
output= ${buildout:bin-directory}/${:_buildout_section_name_}
mode = 0755
context =
section bash bash
[watcher]
<= binsh
[gitlab-export]
<= binsh
# macro: download a file named in buildout.hash.cfg via _update_hash_filename_
#
# [filename]
# <= download-file
[download-file]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:_update_hash_filename_}
destination = ${buildout:directory}/${:_buildout_section_name_}
[database.yml.in]
<= download-file
[gitconfig.in]
<= download-file
[gitlab-parameters.cfg]
<= download-file
[gitlab-shell-config.yml.in]
<= download-file
[gitlab-unicorn-startup.in]
<= download-file
[gitlab.yml.in]
<= download-file
[gitaly-config.toml.in]
<= download-file
[instance-gitlab.cfg.in]
<= download-file
[instance-gitlab-export.cfg.in]
<= download-file
[instance-gitlab-test.cfg.in]
<= download-file
[macrolib.cfg.in]
<= download-file
[nginx-gitlab-http.conf.in]
<= download-file
[nginx.conf.in]
<= download-file
[rack_attack.rb.in]
<= download-file
[resque.yml.in]
<= download-file
[smtp_settings.rb.in]
<= download-file
[template-gitlab-resiliency-restore.sh.in]
<= download-file
[unicorn.rb.in]
<= download-file
[gitlab-demo-backup.git]
recipe = slapos.recipe.build:download-unpacked
url = https://lab.nexedi.com/alain.takoudjou/labdemo.backup/repository/archive.tar.gz?ref=master
md5sum = d40e5e211dc9a4e5ada9c0250377c639
[versions]
cns.recipe.symlink = 0.2.3
docutils = 0.12
plone.recipe.command = 1.1
z3c.recipe.scripts = 1.0.1
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/template/ 0000775 0000000 0000000 00000000000 14241130220 0026634 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/template/database.yml.in 0000664 0000000 0000000 00000001762 14241130220 0031536 0 ustar 00root root 0000000 0000000 {{ autogenerated }}
# see:
# https://gitlab.com/gitlab-org/gitlab-ce/blob/master/config/database.yml.postgresql
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/templates/default/database.yml.erb
# (last updated for 8.7.9+ce.1-0-gf589ad7)
{% from 'macrolib.cfg.in' import cfg with context %}
production:
adapter: postgresql
encoding: unicode
{# collation is mainly for mysql
collation: <%= @db_collation %>
#}
database: {{ pgsql.dbname }}
pool: {{ cfg('db_pool') }}
{# XXX is it ok to use superuser, even if the whole database is only for gitlab? #}
username: '{{ pgsql.superuser }}'
{# we have no password - access is via unix socket #}
password:
host: '{{ pgsql["pgdata-directory"] }}'
port:
socket:
{# not needed for unix socket
sslmode: <%= single_quote(@db_sslmode) %>
sslrootcert: <%= single_quote(@db_sslrootcert) || single_quote(@db_sslca) %>
sslca: <%= single_quote(@db_sslca) || single_quote(@db_sslrootcert) %>
#}
gitaly-config.toml.in 0000664 0000000 0000000 00000002663 14241130220 0032622 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/template # Example Gitaly configuration file
# Documentation lives at https://docs.gitlab.com/ee/administration/gitaly/ and
# https://docs.gitlab.com/ee//administration/gitaly/reference
socket_path = "{{ gitaly.socket }}"
# The directory where Gitaly's executables are stored
bin_dir = "{{ gitaly.location }}"
# # Optional: listen on a TCP socket. This is insecure (no authentication)
# listen_addr = "localhost:9999"
# tls_listen_addr = "localhost:8888
# # Optional: export metrics via Prometheus
# prometheus_listen_addr = "localhost:9236"
# # Git settings
[git]
bin_path = "{{ git }}"
[[storage]]
name = "default"
path = "{{ gitlab.repositories }}"
# # You can optionally configure more storages for this Gitaly instance to serve up
#
# [[storage]]
# name = "other_storage"
# path = "/mnt/other_storage/repositories"
#
# # You can optionally configure Gitaly to output JSON-formatted log messages to stdout
# [logging]
# format = "json"
# # Additionally exceptions can be reported to Sentry
# sentry_dsn = "https://:@sentry.io/
# # You can optionally configure Gitaly to record histogram latencies on GRPC method calls
# [prometheus]
# grpc_latency_buckets = [0.001, 0.005, 0.025, 0.1, 0.5, 1.0, 10.0, 30.0, 60.0, 300.0, 1500.0]
[gitaly-ruby]
# The directory where gitaly-ruby is installed
dir = "{{ gitaly.location }}/ruby"
[gitlab-shell]
# The directory where gitlab-shell is installed
dir = "{{ gitlab_shell_work.location }}"
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/template/gitconfig.in 0000664 0000000 0000000 00000001416 14241130220 0031137 0 ustar 00root root 0000000 0000000 {{ autogenerated }}
# global git configuration for GitLab
# see:
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/attributes/default.rb
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/templates/default/gitconfig.erb
# (last updated for omnibus-gitlab 8.7.9+ce.1-0-gf589ad7)
#
{% from 'macrolib.cfg.in' import cfg with context %}
# don't waste memory when packing (each thread uses own work memory)
# besides it packs better with 1 thread
[pack]
threads = 1
# don't allow corrupt/broken objects to go in
[receive]
fsckObjects = true
[user]
name = {{ cfg('email_display_name') }}
email = {{ cfg('email_from') }}
[core]
autocrlf = input
[gc]
auto = 0
gitlab-shell-config.yml.in 0000664 0000000 0000000 00000004761 14241130220 0033527 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/template {{ autogenerated }}
# see:
# https://gitlab.com/gitlab-org/gitlab-shell/blob/master/config.yml.example
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/templates/default/gitlab-shell-config.yml.erb
# (last updated for omnibus-gitlab 8.7.9+ce.1-0-gf589ad7)
# GitLab user. git by default
user: {{ backend_info.user }}
# Url to gitlab instance. Used for api calls. Should end with a slash.
gitlab_url: "http+unix://{{ urllib.quote_plus(unicorn.socket) }}/"
http_settings:
{# we don't need any
<%= @http_settings.to_json if @http_settings %>
#}
# user: someone
# password: somepass
# ca_file: /etc/ssl/cert.pem
# ca_path: /etc/pki/tls/certs
# self_signed_cert: false
# Repositories path
# Give the canonicalized absolute pathname,
# REPOS_PATH MUST NOT CONTAIN ANY SYMLINK!!!
# Check twice that none of the components is a symlink, including "/home".
# repos_path: "{{ gitlab.repositories }}"
# File used as authorized_keys for gitlab user
# NOTE not used in slapos version (all access via https only)
auth_file: "{{ gitlab.var }}/sshkeys-notused"
# File that contains the secret key for verifying access to GitLab.
# Default is .gitlab_shell_secret in the root directory.
secret_file: "{{ gitlab_shell.secret }}"
# Parent directory for global custom hook directories (pre-receive.d, update.d, post-receive.d)
# Default is hooks in the gitlab-shell directory.
custom_hooks_dir: "{{ gitlab_shell_work.location }}/hooks/"
# Redis settings used for pushing commit notices to gitlab
redis:
bin: {{ redis_binprefix }}/redis-cli
host: {# <%= @redis_host %> #}
port: {# <%= @redis_port %> #}
socket: {{ service_redis.unixsocket }}
database: {# <%= @redis_database %> #}
namespace: resque:gitlab
# Log file.
# Default is gitlab-shell.log in the root directory.
log_file: "{{ gitlab_shell.log }}/gitlab-shell.log"
# Log level. INFO by default
log_level:
# Audit usernames.
# Set to true to see real usernames in the logs instead of key ids, which is easier to follow, but
# incurs an extra API call on every gitlab-shell command.
audit_usernames:
# Enable git-annex support
# git-annex allows managing files with git, without checking the file contents into git
# See https://git-annex.branchable.com/ for documentation
# If enabled, git-annex needs to be installed on the server where gitlab-shell is setup
# For Debian and Ubuntu systems this can be done with: sudo apt-get install git-annex
# For CentOS: sudo yum install epel-release && sudo yum install git-annex
git_annex_enabled:
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/template/gitlab.yml.in 0000664 0000000 0000000 00000053036 14241130220 0031235 0 ustar 00root root 0000000 0000000 {{ autogenerated }}
# see:
# https://gitlab.com/gitlab-org/gitlab-ce/blob/master/config/gitlab.yml.example
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/templates/default/gitlab.yml.erb
# (last updated for omnibus-gitlab 8.8.9+ce.0-g25376053)
{% from 'macrolib.cfg.in' import cfg, cfg_https, external_url with context %}
production: &base
#
# 1. GitLab app settings
# ==========================
## GitLab settings
gitlab:
## Web server settings (note: host is the FQDN, do not include http://)
{% set default_port = {'http': 80, 'https': 443} %}
host: {{ external_url.hostname }}
port: {{ external_url.port or default_port[external_url.scheme] }}
https: {{ cfg_https }}
{# ssh is disabled completely in slapos version
# Uncommment this line below if your ssh host is different from HTTP/HTTPS one
# (you'd obviously need to replace ssh.host_example.com with your own host).
# Otherwise, ssh host will be set to the `host:` value above
ssh_host: <%= @gitlab_ssh_host %>
#}
# WARNING: See config/application.rb under "Relative url support" for the list of
# other files that need to be changed for relative url support
{# we do not support relative URL
relative_url_root: <%= @gitlab_relative_url %>
#}
# Content Security Policy
# See https://guides.rubyonrails.org/security.html#content-security-policy
content_security_policy:
enabled: true
report_only: false
directives:
base_uri:
child_src:
connect_src: "'self' http://localhost:* ws://localhost:* wss://localhost:*"
default_src: "'self'"
font_src:
form_action:
frame_ancestors: "'self'"
frame_src: "'self' https://www.google.com/recaptcha/ https://www.recaptcha.net/ https://content.googleapis.com https://content-compute.googleapis.com https://content-cloudbilling.googleapis.com https://content-cloudresourcemanager.googleapis.com"
img_src: "* data: blob:"
manifest_src:
media_src:
object_src: "'none'"
script_src: "'self' 'unsafe-eval' http://localhost:* https://www.google.com/recaptcha/ https://www.recaptcha.net/ https://www.gstatic.com/recaptcha/ https://apis.google.com"
style_src: "'self' 'unsafe-inline'"
worker_src: "'self' blob:"
report_uri:
# Trusted Proxies
# Customize if you have GitLab behind a reverse proxy which is running on a different machine.
# Add the IP address for your reverse proxy to the list, otherwise users will appear signed in from that address.
trusted_proxies:
{% for proxy in cfg("nginx_real_ip_trusted_addresses").split() %}
- {{ proxy }}
{% endfor %}
# Uncomment and customize if you can't use the default user to run GitLab (default: 'git')
user: {{ backend_info.user }}
## Date & Time settings
time_zone: '{{ cfg("time_zone") }}'
## Email settings
# Uncomment and set to false if you need to disable email sending from GitLab (default: true)
email_enabled: {{ cfg('email_enabled') }}
# Email address used in the "From" field in mails sent by GitLab
email_from: {{ cfg('email_from') }}
email_display_name: {{ cfg('email_display_name') }}
email_reply_to: {{ cfg('email_reply_to') }}
email_subject_suffix: ''
# Email server smtp settings are in [a separate file](initializers/smtp_settings.rb.sample).
## User settings
default_can_create_group: {{ cfg('default_can_create_group') }} # default: true
username_changing_enabled: {{ cfg('username_changing_enabled') }} # default: true - User can change her username/namespace
## Default theme
## 1 - Graphite
## 2 - Charcoal
## 3 - Green
## 4 - Gray
## 5 - Violet
## 6 - Blue
default_theme: {{ cfg('default_theme') }} # default: 2
{# for now we are ok with default issue-closing pattern
## Automatic issue closing
# If a commit message matches this regular expression, all issues referenced from the matched text will be closed.
# This happens when the commit is pushed or merged into the default branch of a project.
# When not specified the default issue_closing_pattern as specified below will be used.
# Tip: you can test your closing pattern at http://rubular.com
issue_closing_pattern: <%= single_quote(@gitlab_issue_closing_pattern) %>
#}
## Default project features settings
default_projects_features:
issues: {{ cfg('default_projects_features.issues') }}
merge_requests: {{ cfg('default_projects_features.merge_requests') }}
wiki: {{ cfg('default_projects_features.wiki') }}
snippets: {{ cfg('default_projects_features.snippets') }}
builds: {{ cfg('default_projects_features.builds') }}
{# container_registry: <%= @gitlab_default_projects_features_container_registry %> #}
## Webhook settings
# Number of seconds to wait for HTTP response after sending webhook HTTP POST request (default: 10)
webhook_timeout: {{ cfg('webhook_timeout') }}
{# default is just ok
## Repository downloads directory
# When a user clicks e.g. 'Download zip' on a project, a temporary zip file is created in the following directory.
# The default is 'shared/cache/archive/' relative to the root of the Rails app.
# repository_downloads_path: shared/cache/archive/
repository_downloads_path: <%= @gitlab_repository_downloads_path %>
#}
{# we do not support reply by email
## Reply by email
# Allow users to comment on issues and merge requests by replying to notification emails.
# For documentation on how to set this up, see http://doc.gitlab.com/ce/administration/reply_by_email.html
incoming_email:
enabled: <%= @incoming_email_enabled %>
# The email address including the `%{key}` placeholder that will be replaced to reference the item being replied to.
# The placeholder can be omitted but if present, it must appear in the "user" part of the address (before the `@`).
address: <%= single_quote(@incoming_email_address) %>
# Email account username
# With third party providers, this is usually the full email address.
# With self-hosted email servers, this is usually the user part of the email address.
user: <%= single_quote(@incoming_email_email) %>
# Email account password
password: <%= single_quote(@incoming_email_password) %>
# IMAP server host
host: <%= single_quote(@incoming_email_host) %>
# IMAP server port
port: <%= @incoming_email_port %>
# Whether the IMAP server uses SSL
ssl: <%= @incoming_email_ssl %>
# Whether the IMAP server uses StartTLS
start_tls: <%= @incoming_email_start_tls %>
# The mailbox where incoming mail will end up. Usually "inbox".
mailbox: <%= single_quote(@incoming_email_mailbox_name) %>
# The IDLE command timeout.
idle_timeout: 60
#}
{# we do not support build artifacts
## Build Artifacts
artifacts:
enabled: <%= @artifacts_enabled %>
# The location where Build Artifacts are stored (default: shared/artifacts).
path: <%= @artifacts_path %>
#}
{# we do not support LFS
## Git LFS
lfs:
enabled: <%= @lfs_enabled %>
# The location where LFS objects are stored (default: shared/lfs-objects).
storage_path: <%= @lfs_storage_path %>
#}
{# we do not support container registry
## Container Registry
registry:
enabled: <%= @registry_enabled %>
host: <%= @registry_host %>
port: <%= @registry_port %>
api_url: <%= @registry_api_url %> # internal address to the registry, will be used by GitLab to directly communicate with API
path: <%= @registry_path %>
key: <%= @registry_key_path %>
issuer: <%= @registry_issuer %>
#}
{# we do not support Pages
## GitLab Pages (EE only)
pages:
enabled: <%= @pages_enabled %>
path: <%= @pages_path %>
host: <%= @pages_host %>
port: <%= @pages_port %>
https: <%= @pages_https %>
external_http: <%= @pages_external_http %>
external_https: <%= @pages_external_https %>
#}
{# we do not support Elasticsearch
## Elasticsearch (EE only)
# Enable it if you are going to use elasticsearch instead of
# regular database search
elasticsearch:
enabled: <%= @elasticsearch_enabled %>
host: <%= @elasticsearch_host %>
port: <%= @elasticsearch_port %>
#}
## Gravatar
## For Libravatar see: http://doc.gitlab.com/ce/customization/libravatar.html
gravatar:
{# default is just ok
# gravatar urls: possible placeholders: %{hash} %{size} %{email}
plain_url: <%= single_quote(@gravatar_plain_url) %> # default: http://www.gravatar.com/avatar/%{hash}?s=%{size}&d=identicon
ssl_url: <%= single_quote(@gravatar_ssl_url) %> # default: https://secure.gravatar.com/avatar/%{hash}?s=%{size}&d=identicon
#}
## Sidekiq
sidekiq:
log_format: json # (default is the original format)
{# XXX cron jobs are disabled for now - we do not support CI and EE features or we are ok with defaults
## Auxiliary jobs
# Periodically executed jobs, to self-heal GitLab, do external synchronizations, etc.
# Please read here for more information: https://github.com/ondrejbartas/sidekiq-cron#adding-cron-job
cron_jobs:
# Flag stuck CI builds as failed
stuck_ci_builds_worker:
cron: <%= @stuck_ci_builds_worker_cron %>
# Remove outdated repository archives
repository_archive_cache_worker:
cron: <%= @repository_archive_cache_worker_cron %>
##
# GitLab EE only jobs:
# Snapshot active users statistics
historical_data_worker:
cron: <%= @historical_data_worker_cron %>
# Update mirrored repositories
update_all_mirrors_worker:
cron: <%= @update_all_mirrors_worker_cron %>
# Update remote mirrors
update_all_remote_mirrors_worker:
cron: <%= @update_all_remote_mirrors_worker_cron %>
# In addition to refreshing users when they log in,
# periodically refresh LDAP users membership.
# NOTE: This will only take effect if LDAP is enabled
ldap_sync_worker:
cron: <%= @ldap_sync_worker_cron %>
# Gitlab Geo nodes notification worker
# NOTE: This will only take effect if Geo is enabled
geo_bulk_notify_worker:
cron: <%= @geo_bulk_notify_worker_cron %>
#}
#
# 2. GitLab CI settings
# ==========================
{# we do not support CI
gitlab_ci:
# Default project notifications settings:
#
# Send emails only on broken builds (default: true)
all_broken_builds: <%= @gitlab_ci_all_broken_builds %>
#
# Add pusher to recipients list (default: false)
add_pusher: <%= @gitlab_ci_add_pusher || @gitlab_ci_add_committer %>
# The location where build traces are stored (default: builds/). Relative paths are relative to Rails.root
builds_path: <%= @builds_directory %>
#}
#
# 3. Auth settings
# ==========================
## LDAP settings
# You can inspect a sample of the LDAP users with login access by running:
# bundle exec rake gitlab:ldap:check RAILS_ENV=production
ldap:
enabled: false
{# just disabled
enabled: <%= @ldap_enabled %>
sync_time: <%= @ldap_sync_time %>
<% if @ldap_servers.any? %>
servers:
<% @ldap_servers.each do |provider_id, settings| %>
<%= provider_id %>: <%= settings.to_json %>
<% end %>
<% else %>
host: <%= single_quote(@ldap_host) %>
port: <%= @ldap_port %>
uid: <%= single_quote(@ldap_uid) %>
method: <%= single_quote(@ldap_method) %> # "tls" or "ssl" or "plain"
bind_dn: <%= single_quote(@ldap_bind_dn) %>
password: <%= single_quote(@ldap_password) %>
active_directory: <%= @ldap_active_directory %>
allow_username_or_email_login: <%= @ldap_allow_username_or_email_login %>
base: <%= single_quote(@ldap_base) %>
user_filter: <%= single_quote(@ldap_user_filter) %>
## EE only
group_base: <%= single_quote(@ldap_group_base) %>
admin_group: <%= single_quote(@ldap_admin_group) %>
sync_ssh_keys: <%= single_quote(@ldap_sync_ssh_keys) %>
sync_time: <%= @ldap_sync_time %>
<% end %>
#}
## Kerberos settings
kerberos:
enabled: false
{# just disabled
# Allow the HTTP Negotiate authentication method for Git clients
enabled: <%= @kerberos_enabled %>
# Kerberos 5 keytab file. The keytab file must be readable by the GitLab user,
# and should be different from other keytabs in the system.
# (default: use default keytab from Krb5 config)
keytab: <%= @kerberos_keytab %>
# The Kerberos service name to be used by GitLab.
# (default: accept any service name in keytab file)
service_principal_name: <%= @kerberos_service_principal_name %>
# Dedicated port: Git before 2.4 does not fall back to Basic authentication if Negotiate fails.
# To support both Basic and Negotiate methods with older versions of Git, configure
# nginx to proxy GitLab on an extra port (e.g. 8443) and uncomment the following lines
# to dedicate this port to Kerberos authentication. (default: false)
use_dedicated_port: <%= @kerberos_use_dedicated_port %>
port: <%= @kerberos_port %>
https: <%= @kerberos_https %>
#}
## OmniAuth settings
omniauth:
enabled: false
{# just disabled
# Allow login via Twitter, Google, etc. using OmniAuth providers
enabled: <%= @omniauth_enabled %>
# Uncomment this to automatically sign in with a specific omniauth provider's without
# showing GitLab's sign-in page (default: show the GitLab sign-in page)
auto_sign_in_with_provider: <%= @omniauth_auto_sign_in_with_provider %>
# CAUTION!
# This allows users to login without having a user account first. Define the allowed
# providers using an array, e.g. ["saml", "twitter"]
# User accounts will be created automatically when authentication was successful.
allow_single_sign_on: <%= @omniauth_allow_single_sign_on.to_json %>
# Locks down those users until they have been cleared by the admin (default: true).
block_auto_created_users: <%= @omniauth_block_auto_created_users %>
# Look up new users in LDAP servers. If a match is found (same uid), automatically
# link the omniauth identity with the LDAP account. (default: false)
auto_link_ldap_user: <%= @omniauth_auto_link_ldap_user %>
# Allow users with existing accounts to login and auto link their account via SAML
# login, without having to do a manual login first and manually add SAML
# (default: false)
auto_link_saml_user: <%= @omniauth_auto_link_saml_user.to_json %>
# Set different Omniauth providers as external so that all users creating accounts
# via these providers will not be able to have access to internal projects. You
# will need to use the full name of the provider, like `google_oauth2` for Google.
# Refer to the examples below for the full names of the supported providers.
# (default: [])
external_providers: <%= @omniauth_external_providers.to_json %>
## Auth providers
# Uncomment the following lines and fill in the data of the auth provider you want to use
# If your favorite auth provider is not listed you can use others:
# see https://github.com/gitlabhq/gitlab-public-wiki/wiki/Custom-omniauth-provider-configurations
# The 'app_id' and 'app_secret' parameters are always passed as the first two
# arguments, followed by optional 'args' which can be either a hash or an array.
# Documentation for this is available at http://doc.gitlab.com/ce/integration/omniauth.html
providers:
# - { name: 'google_oauth2', app_id: 'YOUR APP ID',
# app_secret: 'YOUR APP SECRET',
# args: { access_type: 'offline', approval_prompt: '' } }
# - { name: 'twitter', app_id: 'YOUR APP ID',
# app_secret: 'YOUR APP SECRET'}
# - { name: 'github', app_id: 'YOUR APP ID',
# app_secret: 'YOUR APP SECRET',
# args: { scope: 'user:email' } }
<% @omniauth_providers.each do |provider| %>
- <%= provider.to_json %>
<% end %>
#}
{# default ($RAILS_ROOT/shared/) is ok - we symlinked it to proper place
# Shared file storage settings
shared:
path: <%= @shared_path %>
#}
# Gitaly settings
gitaly:
# Default Gitaly authentication token. Can be overriden per storage. Can
# be left blank when Gitaly is running locally on a Unix socket, which
# is the normal way to deploy Gitaly.
token:
#
# 4. Advanced settings
# ==========================
## Repositories settings
repositories:
# Paths where repositories can be stored. Give the canonicalized absolute pathname.
# IMPORTANT: None of the path components may be symlink, because
# gitlab-shell invokes Dir.pwd inside the repository path and that results
# real path not the symlink.
storages: # You must have at least a `default` storage path.
default:
path: {{ gitlab.repositories }}
gitaly_address: unix:{{ gitaly.socket }} # TCP connections are supported too (e.g. tcp://host:port). TLS connections are also supported using the system certificate pool (eg: tls://host:port).
# gitaly_token: 'special token' # Optional: override global gitaly.token for this storage.
## Backup settings
backup:
path: "{{ gitlab.backup }}" # Relative paths are relative to Rails.root (default: tmp/backups/)
{# default permission is ok
archive_permissions: <%= @backup_archive_permissions %> # Permissions for the resulting backup.tar file (default: 0600)
#}
keep_time: {{ cfg('backup_keep_time') }} # default: 0 (forever) (in seconds)
{# default to backup all schemas is just ok
pg_schema: <%= @backup_pg_schema %> # default: nil, it means that all schemas will be backed up
#}
upload:
{# we don't want to upload backup anywhere by gitlab builtin mechanisms
# Fog storage connection settings, see http://fog.io/storage/ .
connection: <%= @backup_upload_connection.to_json if @backup_upload_connection %>
# The remote 'directory' to store your backups. For S3, this would be the bucket name.
remote_directory: <%= single_quote(@backup_upload_remote_directory) %>
multipart_chunk_size: <%= @backup_multipart_chunk_size %>
encryption: <%= @backup_encryption %>
#}
## GitLab Shell settings
gitlab_shell:
path: {{ gitlab_shell_work.location }}
authorized_keys_file: {{ gitlab.var }}/sshkeys-notused
repos_path: {{ gitlab.repositories }}
hooks_path: {{ gitlab_shell_work.location }}/hooks/
secret_file: {{ gitlab_shell.secret }}
# Git over HTTP
upload_pack: true
receive_pack: true
# Git import/fetch timeout, in seconds. Defaults to 3 hours.
# git_timeout: 10800
{# Git over SSH is disabled elsewhere (so we don't care about ssh_port)
# If you use non-standard ssh port you need to specify it
ssh_port: <%= @gitlab_shell_ssh_port %>
#}
# git-annex support (EE only)
# If this setting is set to true, the same setting in config.yml of
# gitlab-shell needs to be set to true
git_annex_enabled: <%= @git_annex_enabled %>
## Git settings
# CAUTION!
# Use the default values unless you really know what you are doing
git:
bin_path: {{ git }}
# The next value is the maximum memory size grit can use
# Given in number of bytes per git object (e.g. a commit)
# This value can be increased if you have very large commits
max_size: {{ cfg('git_max_size') }}
# Git timeout to read a commit, in seconds
timeout: {{ cfg('git_timeout') }}
#
# 5. Extra customization
# ==========================
extra:
{# we do not use google analytics
<% if @extra_google_analytics_id %>
## Google analytics. Uncomment if you want it
google_analytics_id: <%= single_quote(@extra_google_analytics_id) %>
<% end %>
#}
{# we do not use piwik
<% if @extra_piwik_url %>
## Piwik analytics.
piwik_url: <%= single_quote(@extra_piwik_url) %>
piwik_site_id: <%= single_quote(@extra_piwik_site_id) %>
<% end %>
#}
{# we are ok (for now) with default rack-attack git settings
rack_attack:
git_basic_auth: <%= @rack_attack_git_basic_auth.to_json if @rack_attack_git_basic_auth %>
#}
## Site ICP License
# XXX unquote needed only for slapos.core earlier than
# https://lab.nexedi.com/nexedi/slapos.core/commit/347d33d6
# for now we have a lot of old slapos.core deployed...
{% if cfg('icp_license') != '' -%}
ICP: {{ urllib.unquote_plus( str(cfg('icp_license')) ).decode('utf-8') }}
{# ICP: '{{ cfg("icp_license") }}' #}
{% endif %}
development:
<<: *base
test:
<<: *base
gravatar:
enabled: true
gitlab:
host: localhost
port: 80
# When you run tests we clone and setup gitlab-shell
# In order to setup it correctly you need to specify
# your system username you use to run GitLab
# user: YOUR_USERNAME
satellites:
path: tmp/tests/gitlab-satellites/
repositories:
storages:
default: tmp/tests/repositories/
gitlab_shell:
path: tmp/tests/gitlab-shell/
repos_path: tmp/tests/repositories/
hooks_path: tmp/tests/gitlab-shell/hooks/
issues_tracker:
redmine:
title: "Redmine"
project_url: "http://redmine/projects/:issues_tracker_id"
issues_url: "http://redmine/:project_id/:issues_tracker_id/:id"
new_issue_url: "http://redmine/projects/:issues_tracker_id/issues/new"
ldap:
enabled: false
servers:
main:
label: ldap
host: 127.0.0.1
port: 3890
uid: 'uid'
method: 'plain' # "tls" or "ssl" or "plain"
base: 'dc=example,dc=com'
user_filter: ''
group_base: 'ou=groups,dc=example,dc=com'
admin_group: ''
sync_ssh_keys: false
staging:
<<: *base
nginx-gitlab-http.conf.in 0000664 0000000 0000000 00000020335 14241130220 0033374 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/template {{ autogenerated }}
# see:
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/templates/default/nginx-gitlab-http.conf.erb
# (last updated for omnibus-gitlab 8.7.9+ce.1-0-gf589ad7)
{% from 'macrolib.cfg.in' import cfg, cfg_bool, cfg_https, fqdn with context %}
## GitLab
## Modified from https://gitlab.com/gitlab-org/gitlab-ce/blob/master/lib/support/nginx/gitlab-ssl & https://gitlab.com/gitlab-org/gitlab-ce/blob/master/lib/support/nginx/gitlab
##
## Lines starting with two hashes (##) are comments with information.
## Lines starting with one hash (#) are configuration parameters that can be uncommented.
##
##################################
## CHUNKED TRANSFER ##
##################################
##
## It is a known issue that Git-over-HTTP requires chunked transfer encoding [0]
## which is not supported by Nginx < 1.3.9 [1]. As a result, pushing a large object
## with Git (i.e. a single large file) can lead to a 411 error. In theory you can get
## around this by tweaking this configuration file and either:
## - installing an old version of Nginx with the chunkin module [2] compiled in, or
## - using a newer version of Nginx.
##
## At the time of writing we do not know if either of these theoretical solutions works.
## As a workaround users can use Git over SSH to push large files.
##
## [0] https://git.kernel.org/cgit/git/git.git/tree/Documentation/technical/http-protocol.txt#n99
## [1] https://github.com/agentzh/chunkin-nginx-module#status
## [2] https://github.com/agentzh/chunkin-nginx-module
##
###################################
## configuration ##
###################################
upstream gitlab-workhorse {
server unix:{{ gitlab_workhorse.socket }};
}
{# not needed for us - the frontend can do the redirection and also
gitlab/nginx speaks HSTS on https port so when we access https port via http
protocol, it gets redirected to https
<% if @https && @redirect_http_to_https %>
## Redirects all HTTP traffic to the HTTPS host
server {
<% @listen_addresses.each do |listen_address| %>
listen <%= listen_address %>:<%= @redirect_http_to_https_port %>;
<% end %>
server_name <%= @fqdn %>;
server_tokens off; ## Don't show the nginx version number, a security best practice
return 301 https://<%= @fqdn %>:<%= @port %>$request_uri;
access_log <%= @log_directory %>/gitlab_access.log gitlab_access;
error_log <%= @log_directory %>/gitlab_error.log;
}
<% end %>
#}
server {
listen [{{ backend_info.host }}]:{{ backend_info.port }}{% if cfg_https %} ssl http2{% endif %};
{# we don't use: kerbeeros
<% if @kerberos_enabled && @kerberos_use_dedicated_port %>
listen <%= listen_address %>:<%= @kerberos_port %><% if @kerberos_https %> ssl<% end %>;
<% end %>
#}
server_name {{ fqdn }};
server_tokens off; ## Don't show the nginx version number, a security best practice
## Increase this if you want to upload large attachments
## Or if you want to accept large git objects over http
client_max_body_size {{ cfg('nginx_client_max_body_size') }};
{% if cfg_https %}
## Strong SSL Security
## https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html & https://cipherli.st/
ssl on;
ssl_certificate {{ nginx.cert_file }};
ssl_certificate_key {{ nginx.key_file }};
{# we don't need - most root CA will be included by default
<% if @ssl_client_certificate %>
ssl_client_certificate <%= @ssl_client_certificate%>;
<% end %>
#}
# GitLab needs backwards compatible ciphers to retain compatibility with Java IDEs
# NOTE(slapos) ^^^ is not relevant for us - we are behind frontend and clients
# directly connects to frontend
ssl_ciphers '{{ cfg("nginx_ssl_ciphers") }}';
ssl_protocols {{ cfg('nginx_ssl_protocols') }};
ssl_prefer_server_ciphers {{ cfg('nginx_ssl_prefer_server_ciphers') }};
ssl_session_cache {{ cfg('nginx_ssl_session_cache') }};
ssl_session_timeout {{ cfg('nginx_ssl_session_timeout') }};
{# we do not use: ssl_dhparam
<% if @ssl_dhparam %>
ssl_dhparam <%= @ssl_dhparam %>;
<% end %>
#}
{% endif %}
## Real IP Module Config
## http://nginx.org/en/docs/http/ngx_http_realip_module.html
{% if '{{ cfg("nginx_real_ip_header") }}' %}
real_ip_header '{{ cfg("nginx_real_ip_header") }}';
{% endif %}
{% if '{{ cfg("nginx_real_ip_recursive") }}' %}
real_ip_recursive '{{ cfg("nginx_real_ip_recursive") }}';
{% endif %}
{% for trusted_address in cfg("nginx_real_ip_trusted_addresses").split() %}
set_real_ip_from {{ trusted_address }};
{% endfor %}
## HSTS Config
## https://www.nginx.com/blog/http-strict-transport-security-hsts-and-nginx/
{% if cfg("nginx_hsts_max_age") > 0 -%}
{% if '{{ cfg("nginx_hsts_include_subdomains") }}' == 'true' -%}
add_header Strict-Transport-Security "max-age={{ cfg('nginx_hsts_max_age') }}; includeSubDomains"
{% else -%}
add_header Strict-Transport-Security "max-age={{ cfg('nginx_hsts_max_age') }}";
{% endif -%}
{% endif -%}
## Individual nginx logs for this GitLab vhost
access_log {{ nginx.log }}/gitlab_access.log gitlab_access;
error_log {{ nginx.log }}/gitlab_error.log;
# Set CORS header
add_header 'Access-Control-Allow-Origin' {{ cfg('nginx_header_allow_origin') }};
add_header 'Access-Control-Allow-Credentials' true;
#{{ 'gzip off;' if cfg_https else ''}}
{% if '{{ cfg("nginx_gzip_enabled") }}' == 'true' -%}
gzip on;
gzip_static on;
gzip_comp_level 2;
gzip_http_version 1.1;
gzip_vary on;
gzip_disable "msie6";
gzip_min_length 10240;
gzip_proxied no-cache no-store private expired auth;
gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/json application/xml application/rss+xml;
{% endif -%}
## https://github.com/gitlabhq/gitlabhq/issues/694
## Some requests take more than 30 seconds.
proxy_read_timeout {{ cfg('nginx_proxy_read_timeout') }};
proxy_connect_timeout {{ cfg('nginx_proxy_connect_timeout') }};
proxy_redirect off;
proxy_http_version 1.1;
{# we do not support relative URL - path is always "/" #}
{% set path = "/" %}
#if ($http_host = "") {
# set $http_host_with_default "<%= default_host %>";
#}
#if ($http_host != "") {
# set $http_host_with_default $http_host;
#}
location ~ (\.git/gitlab-lfs/objects|\.git/info/lfs/objects/batch$) {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
{% if cfg_https %}
proxy_set_header X-Forwarded-Ssl on;
{% endif %}
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto {{ "https" if cfg_https else "http" }};
proxy_pass http://gitlab-workhorse;
}
location {{ path }} {
# NOTE(slapos) proxy headers are defined upstream in omnibus-gitlab in:
# - files/gitlab-config-template/gitlab.rb.template nginx['proxy_set_headers']
# - files/gitlab-cookbooks/gitlab/attributes/default.rb default['gitlab']['nginx']['proxy_set_headers']
# - files/gitlab-cookbooks/gitlab/libraries/gitlab.rb parse_nginx_proxy_headers()
# (last updated for omnibus-gitlab 8.5.1+ce.0-1-ge732b39)
if ($request_method = OPTIONS ) {
add_header Allow "GET, OPTIONS";
add_header Content-Type text/plain;
add_header 'Access-Control-Allow-Origin' $http_origin;
add_header Access-Control-Allow-Headers "Origin, X-Requested-With, Authorization, Content-Type, Accept";
return 200;
}
proxy_cache off;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
{% if cfg_https %}
proxy_set_header X-Forwarded-Ssl on;
{% endif %}
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto {{ "https" if cfg_https else "http" }};
proxy_pass http://gitlab-workhorse;
}
location ~ ^/(assets)/ {
proxy_cache off;
proxy_pass http://gitlab-workhorse;
}
error_page 404 /404.html;
error_page 422 /422.html;
error_page 500 /500.html;
error_page 502 /502.html;
location ~ ^/(404|422|500|502)\.html$ {
root {{ gitlab_work.location }}/public;
internal;
}
{# we don't support custom nginx configs
<%= @custom_gitlab_server_config %>
#}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/template/nginx.conf.in 0000664 0000000 0000000 00000006560 14241130220 0031242 0 ustar 00root root 0000000 0000000 {{ autogenerated }}
# see:
# https://gitlab.com/gitlab-org/gitlab-ce/blob/master/lib/support/nginx/gitlab-ssl
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/templates/default/nginx.conf.erb
# (last updated for omnibus-gitlab 8.8.9+ce.0-g25376053)
{% from 'macrolib.cfg.in' import cfg with context %}
# user directive makes sense only when running initially as root
# (and nginx will complain if not and directive given)
# user {{ backend_info.user }};
worker_processes {{ cfg('nginx_worker_processes') }};
error_log stderr;
pid {{ directory.run }}/nginx.pid;
daemon off;
events {
worker_connections {{ cfg('nginx_worker_connections') }};
}
http {
log_format gitlab_access '{{ cfg("nginx_log_format") }}';
{# we do not use: ci, mattermost
log_format gitlab_ci_access '<%= @gitlab_ci_access_log_format %>';
log_format gitlab_mattermost_access '<%= @gitlab_mattermost_access_log_format %>';
#}
sendfile {{ cfg('nginx_sendfile') }};
tcp_nopush {{ cfg('nginx_tcp_nopush') }};
tcp_nodelay {{ cfg('nginx_tcp_nodelay') }};
keepalive_timeout {{ cfg('nginx_keepalive_timeout') }};
gzip {{ cfg('nginx_gzip') }};
gzip_http_version {{ cfg('nginx_gzip_http_version') }};
gzip_comp_level {{ cfg('nginx_gzip_comp_level') }};
gzip_proxied {{ cfg('nginx_gzip_proxied') }};
gzip_types {{ cfg('nginx_gzip_types') }};
include {{ nginx_mime_types }};
{# we do not do nginx caching:
- gitlab-workhorse serves raw blobs fast
- we have caches on frontend nodes
proxy_cache_path <%= @proxy_cache_path %>;
proxy_cache <%= @proxy_cache %>;
#}
include {{ nginx_gitlab_http_conf }};
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
# Remove private_token from the request URI
# In: /foo?private_token=unfiltered&authenticity_token=unfiltered&rss_token=unfiltered&...
# Out: /foo?private_token=[FILTERED]&authenticity_token=unfiltered&rss_token=unfiltered&...
map $request_uri $temp_request_uri_1 {
default $request_uri;
~(?i)^(?.*)(?[\?&]private[\-_]token)=[^&]*(?.*)$ "$start$temp=[FILTERED]$rest";
}
# Remove authenticity_token from the request URI
# In: /foo?private_token=[FILTERED]&authenticity_token=unfiltered&rss_token=unfiltered&...
# Out: /foo?private_token=[FILTERED]&authenticity_token=[FILTERED]&rss_token=unfiltered&...
map $temp_request_uri_1 $temp_request_uri_2 {
default $temp_request_uri_1;
~(?i)^(?.*)(?[\?&]authenticity[\-_]token)=[^&]*(?.*)$ "$start$temp=[FILTERED]$rest";
}
# Remove rss_token from the request URI
# In: /foo?private_token=[FILTERED]&authenticity_token=[FILTERED]&rss_token=unfiltered&...
# Out: /foo?private_token=[FILTERED]&authenticity_token=[FILTERED]&rss_token=[FILTERED]&...
map $temp_request_uri_2 $filtered_request_uri {
default $temp_request_uri_2;
~(?i)^(?.*)(?[\?&]rss[\-_]token)=[^&]*(?.*)$ "$start$temp=[FILTERED]$rest";
}
# A version of the referer without the query string
map $http_referer $filtered_http_referer {
default $http_referer;
~^(?.*)\? $temp;
}
{# we don't need: ci, pages, mattermost, registry
include <%= @gitlab_ci_http_config %>
include <%= @gitlab_pages_http_config %>;
include <%= @gitlab_mattermost_http_config %>
include <%= @gitlab_registry_http_config %>;
#}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/template/rack_attack.rb.in 0000664 0000000 0000000 00000003026 14241130220 0032036 0 ustar 00root root 0000000 0000000 {{ autogenerated }}
# see:
# https://gitlab.com/gitlab-org/gitlab-ce/blob/master/config/initializers/rack_attack.rb.example
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/templates/default/rack_attack.rb.erb
# (last updated for omnibus-gitlab 8.8.9+ce.0-g25376053)
{% from 'macrolib.cfg.in' import cfg with context %}
# 1. Rename this file to rack_attack.rb
# 2. Review the paths_to_be_protected and add any other path you need protecting
#
paths_to_be_protected = [
"#{Rails.application.config.relative_url_root}/users/password",
"#{Rails.application.config.relative_url_root}/users/sign_in",
"#{Rails.application.config.relative_url_root}/api/#{API::API.version}/session.json",
"#{Rails.application.config.relative_url_root}/api/#{API::API.version}/session",
"#{Rails.application.config.relative_url_root}/users",
"#{Rails.application.config.relative_url_root}/users/confirmation",
"#{Rails.application.config.relative_url_root}/unsubscribes/"
]
# Create one big regular expression that matches strings starting with any of
# the paths_to_be_protected.
paths_regex = Regexp.union(paths_to_be_protected.map { |path| /\A#{Regexp.escape(path)}/ })
rack_attack_enabled = Gitlab.config.rack_attack.git_basic_auth['enabled']
unless Rails.env.test? || !rack_attack_enabled
Rack::Attack.throttle('protected paths', limit: {{ cfg('rate_limit_requests_per_period') }}, period: {{ cfg('rate_limit_period') }}.seconds) do |req|
if req.post? && req.path =~ paths_regex
req.ip
end
end
end
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/template/resque.yml.in 0000664 0000000 0000000 00000000513 14241130220 0031267 0 ustar 00root root 0000000 0000000 {{ autogenerated }}
# see:
# https://gitlab.com/gitlab-org/gitlab-ce/blob/master/config/resque.yml.example
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/templates/default/resque.yml.erb
# (last udpdated for omnibus-gitlab 8.7.9+ce.1-0-gf589ad7)
production: unix://{{ redis.unixsocket }}
smtp_settings.rb.in 0000664 0000000 0000000 00000002411 14241130220 0032410 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/template {{ autogenerated }}
# see:
# https://gitlab.com/gitlab-org/gitlab-ce/blob/master/config/initializers/smtp_settings.rb.sample
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/templates/default/smtp_settings.rb.erb
# (last updated for omnibus-gitlab 8.8.9+ce.0-g25376053)
{% from 'macrolib.cfg.in' import cfg, cfg_bool with context %}
{% if cfg_bool('smtp_enable') %}
if Rails.env.production?
Gitlab::Application.config.action_mailer.delivery_method = :smtp
ActionMailer::Base.delivery_method = :smtp
ActionMailer::Base.smtp_settings = {
address: "{{ cfg('smtp_address') }}",
port: {{ cfg('smtp_port') }},
user_name: "{{ cfg('smtp_user_name') }}",
password: "{{ cfg('smtp_password') }}",
domain: "{{ cfg('smtp_domain') }}",
authentication: :{{ cfg('smtp_authentication') }},
enable_starttls_auto: {{ cfg('smtp_enable_starttls_auto') }},
# ssl:
openssl_verify_mode: '{{ cfg("smtp_openssl_verify_mode") }}'
# ca_path:
# ca_file:
}
end
{% else %}
# SMTP disabled in instance configuration (see `smtp_enable` parameter).
# Mail sending, if enabled (see `email_enabled`), will be done via sendmail.
{% endif %}
template-gitlab-resiliency-restore.sh.in 0000664 0000000 0000000 00000006460 14241130220 0036424 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/template #!{{ bash_bin }}
# DO NOT RUN THIS SCRIPT ON PRODUCTION INSTANCE
# DaTA WILL BE ERASED
set -e
echo "###################################################################################"
echo "# #"
echo "# Warning: DO NOT RUN THIS SCRIPT ON PRODUCTION INSTANCE DaTA WILL BE ERASED !!! #"
echo "# #"
echo "###################################################################################"
echo -e "\nWill start in 10 seconds, cancel execution if you didn't want to run this script."
sleep 10
postgres_executable="{{ postgress_script }}"
redis_executable="{{ redis_script }}"
git_backup_directory="{{ gitlab_backup_dir }}"
redis_pid_file="{{ redis_pid_file }}"
postgres_pid_file="{{ postgres_pid_file }}"
bin_location="{{ bin_directory }}"
run_location="{{ run_directory }}"
git_location="{{ git_location }}"
go_work_bin="{{ go_work_bin }}"
etc_location="{{ etc_directory }}"
gitlab_work="{{ gitlab_work_location }}"
promise_check="{{ promise_lab_location }}"
unicorn_script="{{ unicorn_script }}"
sidekiq_script="{{ sidekiq_script }}"
# export GIT_EXEC_PATH=$git_location/libexec/git-core/
check_process () {
pid_file=$1
pname=$2
if [ -e "$pid_file" ]; then
pid=$(head -n 1 $pid_file) > /dev/null 2>&1
if kill -0 "$pid"; then
echo "$pname is already running with pid $pid. Aborting."
exit 1
fi
fi
}
kill_process () {
pid=$1
R=0
kill -0 "$pid" > /dev/null 2>&1 || R=$?
if [ $R -eq 0 ]; then
kill -TERM $pid
fi
}
check_process $postgres_pid_file "Postgres"
check_process $redis_pid_file "Redis"
check_process $run_location/unicorn.pid "Unicorn"
if [ -f "$postgres_pid_file" ]; then
rm $postgres_pid_file
fi
echo "Starting Postgres..."
$postgres_executable &
postgres_pid=$!
trap "echo 'kill $postgres_pid" EXIT TERM INT
echo "Starting Redis server..."
$redis_executable &
redis_pid=$!
trap "kill $postgres_pid $redis_pid" EXIT TERM INT
echo "[OK]"
echo "Restoring gitlab data..."
# XXX - workaround until this problem is fixed on runner1
sed -ie "s/connection.execute('TRUNCATE schema_migrations')\s*$/connection.execute('TRUNCATE schema_migrations') if connection.table_exists? 'schema_migrations'/g" $gitlab_work/lib/tasks/gitlab/db.rake
cd $git_backup_directory
PATH=$bin_location:$go_work_bin:$git_location/bin:$PATH gitlab-backup restore -vupok -go HEAD
echo "Checking gitlab promises..."
echo "[info] Not all promises are checked!"
$promise_check/gitlab-app
echo "Starting Unicorn to check gitlab-shell promise..."
$unicorn_script &
unicorn_pid=$!
trap "kill $postgres_pid $redis_pid $unicorn_pid" EXIT TERM INT
sleep 60
if [ -s "$run_location/unicorn.pid" ]; then
unicorn_ppid=$(head -n 1 $run_location/unicorn.pid) > /dev/null 2>&1
trap "kill $postgres_pid $redis_pid $unicorn_ppid" EXIT TERM INT
fi
$promise_check/gitlab-shell
#echo "starting Sidekiq to check sidekiq promise..."
#$sidekiq_script &
#sidekiq_pid=$!
#trap "kill $sidekiq_pid" EXIT TERM INT
#$promise_check/sidekiq
kill_process $postgres_pid
kill_process $redis_pid
kill_process $unicorn_pid
RESTORE_EXIT_CODE=$?
if [ $RESTORE_EXIT_CODE -eq 0 ]; then
echo 'Backup restoration successfully completed.'
else
echo 'Backup restoration failed.'
fi
exit $RESTORE_EXIT_CODE
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/template/unicorn.rb.in 0000664 0000000 0000000 00000011201 14241130220 0031236 0 ustar 00root root 0000000 0000000 {{ autogenerated }}
# see:
# https://gitlab.com/gitlab-org/gitlab-ce/blob/master/config/unicorn.rb.example
# https://gitlab.com/gitlab-org/gitlab-ce/blob/master/config/unicorn.rb.example.development
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/templates/default/unicorn.rb.erb
# (last updated for omnibus-gitlab 8.7.9+ce.1-0-gf589ad7)
{% from 'macrolib.cfg.in' import cfg with context %}
# What ports/sockets to listen on, and what options for them.
# we listen only on unix socket
listen "{{ unicorn.socket }}", :backlog => {{ cfg('unicorn_backlog_socket') }}
#listen "127.0.0.1:8888", :tcp_nopush => true
working_directory '{{ gitlab_work.location }}'
# What the timeout for killing busy workers is, in seconds
timeout {{ cfg('unicorn_worker_timeout') }}
# combine Ruby 2.0.0dev or REE with "preload_app true" for memory savings
# http://rubyenterpriseedition.com/faq.html#adapt_apps_for_cow
preload_app true
GC.respond_to?(:copy_on_write_friendly=) and
GC.copy_on_write_friendly = true
# Enable this flag to have unicorn test client connections by writing the
# beginning of the HTTP headers before calling the application. This
# prevents calling the application for connections that have disconnected
# while queued. This is only guaranteed to detect clients on the same
# host unicorn runs on, and unlikely to detect disconnects even on a
# fast LAN.
check_client_connection false
# How many worker processes
worker_processes {{ cfg('unicorn_worker_processes') }}
# about before_fork / after_fork - see:
# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/definitions/unicorn_service.rb
# http://bogomips.org/unicorn.git/tree/examples/unicorn.conf.rb?id=3312aca8#n75
# What to do before we fork a worker
before_fork do |server, worker|
# XXX why gitlab does not enable this?
# # the following is highly recomended for Rails + "preload_app true"
# # as there's no need for the master process to hold a connection
# defined?(ActiveRecord::Base) and
# ActiveRecord::Base.connection.disconnect!
# The following is only recommended for memory/DB-constrained
# installations. It is not needed if your system can house
# twice as many worker_processes as you have configured.
#
# This allows a new master process to incrementally
# phase out the old master process with SIGTTOU to avoid a
# thundering herd (especially in the "preload_app false" case)
# when doing a transparent upgrade. The last worker spawned
# will then kill off the old master process with a SIGQUIT.
old_pid = "#{server.config[:pid]}.oldbin"
if old_pid != server.pid
begin
sig = (worker.nr + 1) >= server.worker_processes ? :QUIT : :TTOU
Process.kill(sig, File.read(old_pid).to_i)
rescue Errno::ENOENT, Errno::ESRCH
end
end
#
# Throttle the master from forking too quickly by sleeping. Due
# to the implementation of standard Unix signal handlers, this
# helps (but does not completely) prevent identical, repeated signals
# from being lost when the receiving process is busy.
# sleep 1
end
# What to do after we fork a worker
after_fork do |server, worker|
# per-process listener ports for debugging/admin/migrations
# addr = "127.0.0.1:#{9293 + worker.nr}"
# server.listen(addr, :tries => -1, :delay => 5, :tcp_nopush => true)
# XXX why gitlab does not enable this?
# # the following is *required* for Rails + "preload_app true",
# defined?(ActiveRecord::Base) and
# ActiveRecord::Base.establish_connection
# reset prometheus client, this will cause any opened metrics files to be closed
#defined?(::Prometheus::Client.reinitialize_on_pid_change) &&
# Prometheus::Client.reinitialize_on_pid_change
# if preload_app is true, then you may also want to check and
# restart any other shared sockets/descriptors such as Memcached,
# and Redis. TokyoCabinet file handles are safe to reuse
# between any number of forked children (assuming your kernel
# correctly implements pread()/pwrite() system calls)
end
# Where to drop a pidfile
pid '{{ directory.run }}/unicorn.pid'
# Where stderr gets logged
stderr_path '{{ unicorn.log }}/unicorn_stderr.log'
# Where stdout gets logged
stdout_path '{{ unicorn.log }}/unicorn_stdout.log'
{# we do not support Relative url
<%- if @relative_url %>
# Relative url from where GitLab is served
ENV['RAILS_RELATIVE_URL_ROOT'] = "<%= @relative_url %>"
<%- end %>
#}
# Min memory size (RSS) per worker
ENV['GITLAB_UNICORN_MEMORY_MIN'] = ({{ cfg('unicorn_worker_memory_limit_min') }}).to_s
# Max memory size (RSS) per worker
ENV['GITLAB_UNICORN_MEMORY_MAX'] = ({{ cfg('unicorn_worker_memory_limit_max') }}).to_s
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/test/ 0000775 0000000 0000000 00000000000 14241130220 0026000 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/test/README.md 0000664 0000000 0000000 00000000042 14241130220 0027253 0 ustar 00root root 0000000 0000000 Tests for Gitlab software release
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/test/setup.py 0000664 0000000 0000000 00000003657 14241130220 0027525 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.gitlab'
long_description = open("README.md").read()
setup(
name=name,
version=version,
description="Test for SlapOS' Gitlab",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'slapos.libnetworkcache',
'erp5.util',
'supervisor',
'requests',
],
zip_safe=True,
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/test/test.py 0000664 0000000 0000000 00000004120 14241130220 0027326 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2019 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import os
import logging
from six.moves.urllib.parse import urlparse
import requests
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'software.cfg')))
class TestGitlab(SlapOSInstanceTestCase):
__partition_reference__ = 'G' # solve path too long for postgresql and unicorn
@classmethod
def getInstanceSoftwareType(cls):
return 'gitlab-test'
def setUp(self):
self.backend_url = self.computer_partition.getConnectionParameterDict(
)['backend_url']
def test_http_get(self):
resp = requests.get(self.backend_url, verify=False)
self.assertTrue(
resp.status_code in [requests.codes.ok, requests.codes.found])
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/gitlab/watcher.in 0000664 0000000 0000000 00000003466 14241130220 0027017 0 ustar 00root root 0000000 0000000 #!{{ bash.location }}/bin/bash
# run program under watchdog
# watcher [ ...]
#
# = code1,code2,...
#
# if the program terminates with status in - it is restarted after grace period.
# if the program terminates otherwise - whole process terminates.
#
# code can be numeric or symbolic - refering to a signal name. example:
#
# watcher 0,SIGKILL ...
die() {
echo "$@" 1>&2
exit 1
}
if [ "$#" -lt 2 ]; then
die "Usage: watcher [ ...]"
fi
restart_codes="$1"; shift
prog="$@"
# signumber -> #sig
signumber() {
signame=$1
# "11) SIGSEGV "
sigentry=`kill -l |grep -o "[0-9]\+) $signame\(\s\|$\)"` ||
die "E: $signame is not a signal"
echo "$sigentry" | grep -o "[0-9]\+"
}
# restart codes as set
declare -A restarts
for code in `echo "$restart_codes" |sed 's/,/ /g'`; do
case $code in
*[!0-9]*)
# non-number - treat it as signal name
signo=`signumber $code` || exit 1
code=$((128 + $signo)) # exit code of process terminated by signal #signo
;;
*)
# already number
;;
esac
restarts[$code]=y
done
progpid=""
# make sure to terminate children, when we exit.
# needed for e.g. when `slapos node stop ...` kills us.
trap 'atexit' EXIT
atexit() {
jobs="$(jobs -p)"
test -n "$jobs" && kill $jobs
}
# run prog under monitoring
while true; do
echo "run $prog"
$prog &
progpid=$!
echo "wait $progpid"
wait $progpid
status=$?
echo "-> $status"
# if program terminated not with expected status - exit
if [ "${restarts[$status]}" != y ] ; then
echo "exit $status"
exit "$status"
fi
# otherwise sleep a bit and restart
sleep 1
done
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana/ 0000775 0000000 0000000 00000000000 14241130220 0025156 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana/README.md 0000664 0000000 0000000 00000003175 14241130220 0026443 0 ustar 00root root 0000000 0000000 # grafana / telegraf / influxdb
This is an experimental integration, mainly to evaluate these solutions.
## Custom telegraf plugins
See https://github.com/influxdata/telegraf to learn about plugins.
Useful plugins in this context are probably
[exec](https://github.com/influxdata/telegraf/tree/v1.17.3/plugins/inputs/exec),
[logparser](https://github.com/influxdata/telegraf/tree/v1.17.3/plugins/inputs/logparser)
or
[http](https://github.com/influxdata/telegraf/tree/v1.17.3/plugins/inputs/http).
Telegraf will save in the `telegraf` database from the embedded influxdb server.
## Grafana
A default user is created, username and password are published as connection
parameters. You can add more users in grafana interface.
Datasources should be automatically added.
## Influxdb
Influxdb backups are not done automatically by this software release.
One important thing to notice is that the backup protocol is enabled on ipv4
provided by slapos, so make sure this ip is not reachable from untrusted
sources.
# Ingesting/Visualizing logs
Eventhough main feature is visualizing metrics, Grafana has a feature called "Explore" to view logs for a time frame.
The following backend can be used:
## Loki
See `TestLoki` in test for an example.
## Influxdb
Influxdb logs only have tags and there does not seem to be a way to search (except than tag and time frame).
To inject log files containing:
```
INFO the message
WARN another message
```
use config like:
```
[[inputs.logparser]]
files = ["/tmp/x*.log", "/tmp/aaa.log"]
[inputs.logparser.grok]
measurement = "logs"
patterns = ['^%{WORD:level:tag} %{GREEDYDATA:message:string}']
```
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana/buildout.hash.cfg 0000664 0000000 0000000 00000002640 14241130220 0030412 0 ustar 00root root 0000000 0000000 # THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[instance-profile]
filename = instance.cfg.in
md5sum = 6fb42f5663864789ff7b375a9a86888e
[influxdb-config-file]
filename = influxdb-config-file.cfg.in
md5sum = a28972ced3e0f4aa776e43a9c44717c0
[telegraf-config-file]
filename = telegraf-config-file.cfg.in
md5sum = a1a9c22c2a7829c66a49fc2504604d21
[grafana-config-file]
filename = grafana-config-file.cfg.in
md5sum = e255dcca466f5de51698d24cbd114577
[grafana-provisioning-config-file]
filename = grafana-provisioning-config-file.cfg.in
md5sum = 3aa0f1ed752b2a59ea2b5e7c1733daf3
[loki-config-file]
filename = loki-config-file.cfg.in
md5sum = ad2baf4599a937d7352034a41fa24814
[promtail-config-file]
filename = promtail-config-file.cfg.in
md5sum = c8c9d815dd7b427788c066f041f04573
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana/grafana-config-file.cfg.in0000664 0000000 0000000 00000035101 14241130220 0032023 0 ustar 00root root 0000000 0000000 ##################### Grafana Configuration Defaults #####################
#
# Do not modify this file in grafana installs
#
# possible values : production, development
app_mode = production
# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
instance_name = ${HOSTNAME}
#################################### Paths ###############################
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
#data = data
data = {{ grafana['data-dir'] }}
# Directory where grafana can store logs
#logs = data/log
logs = {{ grafana['logs-dir'] }}
# Directory where grafana will automatically scan and look for plugins
#plugins = data/plugins
plugins = {{ grafana['plugins-dir'] }}
# folder that contains provisioning config files that grafana will apply on startup and while running.
#provisioning = conf/provisioning
provisioning = {{ grafana['provisioning-config-dir'] }}
#################################### Server ##############################
[server]
# Protocol (http, https, socket)
protocol = https
# The ip address to bind to, empty will bind to all interfaces
#http_addr =
http_addr = [{{ grafana['ipv6'] }}]
# The http port to use
#http_port = 3000
http_port = {{ grafana['port'] }}
# The public facing domain name used to access grafana from a browser
domain = {{ apache_frontend['connection-domain'] }}
# Redirect to correct domain if host header does not match domain
# Prevents DNS rebinding attacks
enforce_domain = false
# The full public facing url
root_url = {{ apache_frontend['connection-secure_access'] }}
# Log web requests
router_logging = false
# the path relative working path
static_root_path = public
# enable gzip
#enable_gzip = false
enable_gzip = true
# https certs & key file
#cert_file =
cert_file = {{ grafana['ssl-cert-file'] }}
#cert_key =
cert_key = {{ grafana['ssl-key-file'] }}
# Unix socket path
#socket = /tmp/grafana.sock
#################################### Database ############################
[database]
# You can configure the database connection by specifying type, host, name, user and password
# as separate properties or as on string using the url property.
# Either "mysql", "postgres" or "sqlite3", it's your choice
type = sqlite3
host = 127.0.0.1:3306
name = grafana
user = root
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
password =
# Use either URL or the previous fields to configure the database
# Example: mysql://user:secret@host:port/database
url =
# Max idle conn setting default is 2
max_idle_conn = 2
# Max conn setting default is 0 (mean not set)
max_open_conn =
# Set to true to log the sql calls and execution times.
log_queries =
# For "postgres", use either "disable", "require" or "verify-full"
# For "mysql", use either "true", "false", or "skip-verify".
ssl_mode = disable
ca_cert_path =
client_key_path =
client_cert_path =
server_cert_name =
# For "sqlite3" only, path relative to data_path setting
path = grafana.db
#################################### Session #############################
[session]
# Either "memory", "file", "redis", "mysql", "postgres", "memcache", default is "file"
provider = file
# Provider config options
# memory: not have any config yet
# file: session dir path, is relative to grafana data_path
# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
# mysql: go-sql-driver/mysql dsn config string, examples:
# `user:password@tcp(127.0.0.1:3306)/database_name`
# `user:password@unix(/var/run/mysqld/mysqld.sock)/database_name`
# memcache: 127.0.0.1:11211
provider_config = sessions
# Session cookie name
cookie_name = grafana_sess
# If you use session in https only, default is false
#cookie_secure = false
cookie_secure = true
# Session life time, default is 86400
session_life_time = 86400
gc_interval_time = 86400
#################################### Data proxy ###########################
[dataproxy]
# This enables data proxy logging, default is false
logging = false
#################################### Analytics ###########################
[analytics]
# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
# No ip addresses are being tracked, only simple counters to track
# running instances, dashboard and error counts. It is very helpful to us.
# Change this option to false to disable reporting.
reporting_enabled = true
# Set to false to disable all checks to https://grafana.com
# for new versions (grafana itself and plugins), check is used
# in some UI views to notify that grafana or plugin update exists
# This option does not cause any auto updates, nor send any information
# only a GET request to https://grafana.com to get latest versions
check_for_updates = true
# Google Analytics universal tracking code, only enabled if you specify an id here
google_analytics_ua_id =
# Google Tag Manager ID, only enabled if you specify an id here
google_tag_manager_id =
#################################### Security ############################
[security]
# default admin user, created on startup
#admin_user = "admin"
admin_user = "{{ grafana['admin-user'] }}"
# default admin password, can be changed before first start of grafana, or in profile settings
#admin_password = admin
admin_password = "{{ grafana['admin-password'] }}"
# used for signing
#secret_key = SW2YcwTIb9zpOOhoPsMm
secret_key = "{{ grafana['secret-key'] }}"
# Auto-login remember days
login_remember_days = 7
cookie_username = grafana_user
cookie_remember_name = grafana_remember
# disable gravatar profile images
disable_gravatar = false
# data source proxy whitelist (ip_or_domain:port separated by spaces)
data_source_proxy_whitelist =
#################################### Snapshots ###########################
[snapshots]
# snapshot sharing options
external_enabled = true
external_snapshot_url = https://snapshots-origin.raintank.io
external_snapshot_name = Publish to snapshot.raintank.io
# remove expired snapshot
snapshot_remove_expired = true
# remove snapshots after 90 days
snapshot_TTL_days = 90
#################################### Dashboards ##################
[dashboards]
# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1
versions_to_keep = 20
#################################### Users ###############################
[users]
# disable user signup / registration
allow_sign_up = false
# Allow non admin users to create organizations
allow_org_create = false
# Set to true to automatically assign new users to the default organization (id 1)
auto_assign_org = true
# Default role new users will be automatically assigned (if auto_assign_org above is set to true)
auto_assign_org_role = Viewer
# Require email validation before sign up completes
verify_email_enabled = false
# Background text for the user field on the login page
login_hint = email or username
# Default UI theme ("dark" or "light")
default_theme = dark
# External user management
external_manage_link_url =
external_manage_link_name =
external_manage_info =
# Viewers can edit/inspect dashboard settings in the browser. But not save the dashboard.
viewers_can_edit = false
[auth]
# Set to true to disable (hide) the login form, useful if you use OAuth
disable_login_form = false
# Set to true to disable the signout link in the side menu. useful if you use auth.proxy
disable_signout_menu = false
#################################### Anonymous Auth ######################
[auth.anonymous]
# enable anonymous access
enabled = false
# specify organization name that should be used for unauthenticated users
org_name = Main Org.
# specify role for unauthenticated users
org_role = Viewer
#################################### Github Auth #########################
[auth.github]
enabled = false
allow_sign_up = true
client_id = some_id
client_secret = some_secret
scopes = user:email
auth_url = https://github.com/login/oauth/authorize
token_url = https://github.com/login/oauth/access_token
api_url = https://api.github.com/user
team_ids =
allowed_organizations =
#################################### Google Auth #########################
[auth.google]
enabled = false
allow_sign_up = true
client_id = some_client_id
client_secret = some_client_secret
scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
auth_url = https://accounts.google.com/o/oauth2/auth
token_url = https://accounts.google.com/o/oauth2/token
api_url = https://www.googleapis.com/oauth2/v1/userinfo
allowed_domains =
hosted_domain =
#################################### Grafana.com Auth ####################
# legacy key names (so they work in env variables)
[auth.grafananet]
enabled = false
allow_sign_up = true
client_id = some_id
client_secret = some_secret
scopes = user:email
allowed_organizations =
[auth.grafana_com]
enabled = false
allow_sign_up = true
client_id = some_id
client_secret = some_secret
scopes = user:email
allowed_organizations =
#################################### Generic OAuth #######################
[auth.generic_oauth]
name = OAuth
enabled = false
allow_sign_up = true
client_id = some_id
client_secret = some_secret
scopes = user:email
auth_url =
token_url =
api_url =
team_ids =
allowed_organizations =
#################################### Basic Auth ##########################
[auth.basic]
enabled = true
#################################### Auth Proxy ##########################
[auth.proxy]
enabled = false
header_name = X-WEBAUTH-USER
header_property = username
auto_sign_up = true
ldap_sync_ttl = 60
whitelist =
#################################### Auth LDAP ###########################
[auth.ldap]
enabled = false
config_file = /etc/grafana/ldap.toml
allow_sign_up = true
#################################### SMTP / Emailing #####################
[smtp]
#enabled = false
enabled = {{ slapparameter_dict.get('smtp-server') and 'true' or 'false' }}
#host = locahost:25
host = {{ slapparameter_dict.get('smtp-server', '') }}
#user =
user = {{ slapparameter_dict.get('smtp-username', '') }}
# If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
#password =
password = {{ slapparameter_dict.get('smtp-password', '') and '"""%s"""' % slapparameter_dict['smtp-password'] or ""}}
cert_file =
key_file =
#skip_verify = false
skip_verify = {{ slapparameter_dict.get('smtp-verify-ssl', 'true').lower() == 'true' and 'false' or 'true' }}
#from_address = admin@grafana.localhost
from_address = {{ slapparameter_dict.get('email-from-address', '') }}
#from_name = Grafana
from_name = {{ slapparameter_dict.get('email-from-name', 'Grafana') }}
ehlo_identity =
[emails]
welcome_email_on_sign_up = false
templates_pattern = emails/*.html
#################################### Logging ##########################
[log]
# Either "console", "file", "syslog". Default is console and file
# Use space to separate multiple modes, e.g. "console file"
mode = console file
# Either "debug", "info", "warn", "error", "critical", default is "info"
level = info
# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
filters =
# For "console" mode only
[log.console]
level =
# log line format, valid options are text, console and json
format = console
# For "file" mode only
[log.file]
level =
# log line format, valid options are text, console and json
format = text
# This enables automated log rotate(switch of following options), default is true
log_rotate = true
# Max line number of single file, default is 1000000
max_lines = 1000000
# Max size shift of single file, default is 28 means 1 << 28, 256MB
max_size_shift = 28
# Segment log daily, default is true
daily_rotate = true
# Expired days of log file(delete after max days), default is 7
max_days = 7
[log.syslog]
level =
# log line format, valid options are text, console and json
format = text
# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
network =
address =
# Syslog facility. user, daemon and local0 through local7 are valid.
facility =
# Syslog tag. By default, the process' argv[0] is used.
tag =
#################################### Usage Quotas ########################
[quota]
enabled = false
#### set quotas to -1 to make unlimited. ####
# limit number of users per Org.
org_user = 10
# limit number of dashboards per Org.
org_dashboard = 100
# limit number of data_sources per Org.
org_data_source = 10
# limit number of api_keys per Org.
org_api_key = 10
# limit number of orgs a user can create.
user_org = 10
# Global limit of users.
global_user = -1
# global limit of orgs.
global_org = -1
# global limit of dashboards
global_dashboard = -1
# global limit of api_keys
global_api_key = -1
# global limit on number of logged in users.
global_session = -1
#################################### Alerting ############################
[alerting]
# Disable alerting engine & UI features
enabled = true
# Makes it possible to turn off alert rule execution but alerting UI is visible
execute_alerts = true
#################################### Internal Grafana Metrics ############
# Metrics available at HTTP API Url /metrics
[metrics]
enabled = true
interval_seconds = 10
# Send internal Grafana metrics to graphite
[metrics.graphite]
# Enable by setting the address setting (ex localhost:2003)
address =
prefix = prod.grafana.%(instance_name)s.
[grafana_net]
url = https://grafana.com
[grafana_com]
url = https://grafana.com
#################################### Distributed tracing ############
[tracing.jaeger]
# jaeger destination (ex localhost:6831)
address =
# tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
always_included_tag =
# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
sampler_type = const
# jaeger samplerconfig param
# for "const" sampler, 0 or 1 for always false/true respectively
# for "probabilistic" sampler, a probability between 0 and 1
# for "rateLimiting" sampler, the number of spans per second
# for "remote" sampler, param is the same as for "probabilistic"
# and indicates the initial sampling rate before the actual one
# is received from the mothership
sampler_param = 1
#################################### External Image Storage ##############
[external_image_storage]
# You can choose between (s3, webdav, gcs, azure_blob)
provider =
[external_image_storage.s3]
bucket_url =
bucket =
region =
path =
access_key =
secret_key =
[external_image_storage.webdav]
url =
username =
password =
public_url =
[external_image_storage.gcs]
key_file =
bucket =
path =
[external_image_storage.azure_blob]
account_name =
account_key =
container_name =
grafana-provisioning-config-file.cfg.in 0000664 0000000 0000000 00000000767 14241130220 0034502 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana # https://grafana.com/docs/administration/provisioning/#example-datasource-config-file
apiVersion: 1
datasources:
- name: telegraf
type: influxdb
access: proxy
url: {{ influxdb['url'] }}
user: {{ influxdb['auth-username'] }}
database: telegraf
isDefault: true
jsonData:
tlsSkipVerify: true
secureJsonData:
password: {{ influxdb['auth-password'] }}
version: 1
editable: false
- name: loki
type: loki
access: proxy
url: {{ loki['url'] }}
version: 1
editable: false
influxdb-config-file.cfg.in 0000664 0000000 0000000 00000005725 14241130220 0032171 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana reporting-disabled = false
bind-address = "[{{ influxdb['local-host'] }}]:{{ influxdb['rpc-port'] }}"
[meta]
dir = "{{ influxdb['data-dir'] }}/meta"
retention-autocreate = true
logging-enabled = true
[data]
dir = "{{ influxdb['data-dir'] }}/data"
index-version = "inmem"
wal-dir = "{{ influxdb['data-dir'] }}/wal"
wal-fsync-delay = "0s"
query-log-enabled = true
cache-max-memory-size = 1073741824
cache-snapshot-memory-size = 26214400
cache-snapshot-write-cold-duration = "10m0s"
compact-full-write-cold-duration = "4h0m0s"
max-series-per-database = 1000000
max-values-per-tag = 100000
max-concurrent-compactions = 0
trace-logging-enabled = false
[coordinator]
write-timeout = "10s"
max-concurrent-queries = 0
query-timeout = "0s"
log-queries-after = "0s"
max-select-point = 0
max-select-series = 0
max-select-buckets = 0
[retention]
enabled = true
check-interval = "30m0s"
[shard-precreation]
enabled = true
check-interval = "10m0s"
advance-period = "30m0s"
[monitor]
store-enabled = true
store-database = "_internal"
store-interval = "10s"
[subscriber]
enabled = true
http-timeout = "30s"
insecure-skip-verify = false
ca-certs = ""
write-concurrency = 40
write-buffer-size = 1000
[http]
enabled = true
bind-address = "[{{ influxdb['host'] }}]:{{ influxdb['http-port'] }}"
auth-enabled = true
log-enabled = true
write-tracing = false
pprof-enabled = true
https-enabled = true
https-certificate = "{{ influxdb['ssl-cert-file'] }}"
https-private-key = "{{ influxdb['ssl-key-file'] }}"
max-row-limit = 0
max-connection-limit = 0
shared-secret = ""
realm = "InfluxDB"
unix-socket-enabled = true
bind-socket = "{{ influxdb['unix-socket'] }}"
max-body-size = 25000000
[ifql]
enabled = false
log-enabled = true
bind-address = ":8082"
[[graphite]]
enabled = false
bind-address = ":2003"
database = "graphite"
retention-policy = ""
protocol = "tcp"
batch-size = 5000
batch-pending = 10
batch-timeout = "1s"
consistency-level = "one"
separator = "."
udp-read-buffer = 0
[[collectd]]
enabled = false
bind-address = ":25826"
database = "collectd"
retention-policy = ""
batch-size = 5000
batch-pending = 10
batch-timeout = "10s"
read-buffer = 0
typesdb = "/usr/share/collectd/types.db"
security-level = "none"
auth-file = "/etc/collectd/auth_file"
parse-multivalue-plugin = "split"
[[opentsdb]]
enabled = false
bind-address = ":4242"
database = "opentsdb"
retention-policy = ""
consistency-level = "one"
tls-enabled = false
certificate = "/etc/ssl/influxdb.pem"
batch-size = 1000
batch-pending = 5
batch-timeout = "1s"
log-point-errors = true
[[udp]]
enabled = false
bind-address = ":8089"
database = "udp"
retention-policy = ""
batch-size = 5000
batch-pending = 10
read-buffer = 0
batch-timeout = "1s"
precision = ""
[continuous_queries]
log-enabled = true
enabled = true
query-stats-enabled = false
run-interval = "1s"
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana/instance-input-schema.json0000664 0000000 0000000 00000002416 14241130220 0032253 0 ustar 00root root 0000000 0000000 {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Parameters to instantiate Grafana",
"type": "object",
"additionalProperties": false,
"properties": {
"smtp-server": {
"description": "SMTP server used by grafana to send emails (in host:port format). Leaving this empty will disable email sending.",
"type": "string"
},
"smtp-username": {
"description": "Username to connect to SMTP server",
"type": "string"
},
"smtp-password": {
"description": "Password to connect to SMTP server",
"type": "string"
},
"smtp-verify-ssl": {
"description": "Verify SSL certificate of SMTP server",
"type": "string",
"enum": [
"true",
"false"
]
},
"email-from-address": {
"description": "Email address used in From: header of emails",
"type": "string"
},
"email-from-name": {
"description": "Name used in From: header of emails",
"default": "Grafana",
"type": "string"
},
"promtail-extra-scrape-config": {
"description": "Raw promtail config (experimental parameter, see https://github.com/grafana/loki/blob/v0.3.0/docs/promtail.md#scrape-configs for detail)",
"default": "",
"type": "string"
}
}
}
instance-output-schema.json 0000664 0000000 0000000 00000002507 14241130220 0032376 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Values returned by Grafana instantiation",
"additionalProperties": false,
"properties": {
"url": {
"description": "Shared frontend for this Grafana instance",
"pattern": "^https://",
"type": "string"
},
"grafana-username": {
"description": "Admin user for grafana",
"type": "string"
},
"grafana-password": {
"description": "Password for grafana's admin user",
"type": "string"
},
"grafana-url": {
"description": "IPv6 URL to access grafana",
"pattern": "^https://",
"type": "string"
},
"influxdb-url": {
"description": "IPv6 URL of influxdb HTTP endpoint",
"pattern": "^https://",
"type": "string"
},
"influxdb-database": {
"description": "database created in influxdb",
"type": "string"
},
"influxdb-username": {
"description": "username for influxdb",
"type": "string"
},
"influxdb-password": {
"description": "password for influxdb user",
"type": "string"
},
"telegraf-extra-config-dir": {
"description": "Directory in telegraf partition where extra configuration file will be loaded. These files must match *.conf pattern",
"type": "string"
}
},
"type": "object"
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana/instance.cfg.in 0000664 0000000 0000000 00000021471 14241130220 0030055 0 ustar 00root root 0000000 0000000 [buildout]
parts =
promises
publish-connection-parameter
eggs-directory = {{ buildout['eggs-directory'] }}
develop-eggs-directory = {{ buildout['develop-eggs-directory'] }}
offline = true
[instance-parameter]
recipe = slapos.cookbook:slapconfiguration
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
[slap-configuration]
# apache-frontend reads from from a part named [slap-configuration]
recipe = slapos.cookbook:slapconfiguration.serialised
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
[directory]
recipe = slapos.cookbook:mkdirectory
home = ${buildout:directory}
etc = ${:home}/etc
var = ${:home}/var
srv = ${:home}/srv
service = ${:etc}/service
promise = ${:etc}/promise
influxdb-data-dir = ${:srv}/influxdb
grafana-dir = ${:srv}/grafana
grafana-data-dir = ${:grafana-dir}/data
grafana-logs-dir = ${:var}/log
grafana-plugins-dir = ${:grafana-dir}/plugins
grafana-provisioning-config-dir = ${:grafana-dir}/provisioning-config
grafana-provisioning-datasources-dir = ${:grafana-provisioning-config-dir}/datasources
grafana-provisioning-dashboards-dir = ${:grafana-provisioning-config-dir}/dashboards
telegraf-dir = ${:srv}/telegraf
telegraf-extra-config-dir = ${:telegraf-dir}/extra-config
loki-dir = ${:srv}/loki
loki-storage-boltdb-dir = ${:loki-dir}/index/
loki-storage-filesystem-dir = ${:loki-dir}/chunks/
promtail-dir = ${:srv}/promtail
# macros
[generate-certificate]
recipe = plone.recipe.command
command =
if [ ! -e ${:key-file} ]
then
{{ openssl_bin }} req -x509 -nodes -days 3650 \
-subj "/C=AA/ST=X/L=X/O=Dis/CN=${:common-name}" \
-newkey rsa:1024 -keyout ${:key-file} \
-out ${:cert-file}
fi
update-command = ${:command}
key-file = ${directory:etc}/${:_buildout_section_name_}.key
cert-file = ${directory:etc}/${:_buildout_section_name_}.crt
common-name = ${:_buildout_section_name_}
[config-file]
recipe = slapos.recipe.template:jinja2
url = {{ buildout['parts-directory'] }}/${:_buildout_section_name_}/${:_buildout_section_name_}.cfg.in
output = ${directory:etc}/${:_buildout_section_name_}.cfg
extensions = jinja2.ext.do
[check-port-listening-promise]
recipe = slapos.cookbook:check_port_listening
path = ${directory:promise}/${:_buildout_section_name_}
[check-url-available-promise]
recipe = slapos.cookbook:check_url_available
path = ${directory:promise}/${:_buildout_section_name_}
dash_path = {{ dash_bin }}
curl_path = {{ curl_bin }}
[influxdb]
ipv6 = ${instance-parameter:ipv6-random}
ipv4 = ${instance-parameter:ipv4-random}
host = ${:ipv6}
local-host = ${:ipv4}
rpc-port = 8088
http-port = 8086
url = https://[${:host}]:${:http-port}
data-dir = ${directory:influxdb-data-dir}
auth-username = ${influxdb-password:username}
auth-password = ${influxdb-password:passwd}
unix-socket = ${directory:var}/influxdb.socket
ssl-cert-file = ${influxdb-certificate:cert-file}
ssl-key-file = ${influxdb-certificate:key-file}
database = telegraf
recipe = slapos.cookbook:wrapper
command-line =
nice -19 chrt --idle 0 ionice -c3 {{ influxd_bin }} -config ${influxdb-config-file:output}
wrapper-path = ${directory:service}/influxdb
[influxdb-config-file]
<= config-file
context =
section influxdb influxdb
[influxdb-password]
recipe = slapos.cookbook:generate.password
username = influxdb
[influxdb-certificate]
<= generate-certificate
[influxdb-listen-promise]
<= check-port-listening-promise
hostname = ${influxdb:ipv6}
port = ${influxdb:http-port}
[influxdb-password-promise]
recipe = slapos.cookbook:wrapper
command-line =
{{ influx_bin }} -username ${influxdb:auth-username} -password ${influxdb:auth-password} -socket ${influxdb:unix-socket} -execute "CREATE USER ${influxdb:auth-username} WITH PASSWORD '${influxdb:auth-password}' WITH ALL PRIVILEGES"
wrapper-path = ${directory:promise}/${:_buildout_section_name_}
[influxdb-database-ready-promise]
recipe = slapos.cookbook:wrapper
command-line =
bash -c "{{ influx_bin }} \
-username ${influxdb:auth-username} \
-password ${influxdb:auth-password} \
-host [${influxdb:host}] \
-port ${influxdb:http-port} \
-unsafeSsl \
-ssl \
-execute 'show databases' | grep '${influxdb:database}'"
wrapper-path = ${directory:promise}/${:_buildout_section_name_}
[grafana]
ipv6 = ${instance-parameter:ipv6-random}
port = 8180
url = https://[${:ipv6}]:${:port}
data-dir = ${directory:grafana-data-dir}
logs-dir = ${directory:grafana-logs-dir}
plugins-dir = ${directory:grafana-plugins-dir}
provisioning-config-dir = ${directory:grafana-provisioning-config-dir}
provisioning-datasources-dir = ${directory:grafana-provisioning-datasources-dir}
admin-user = ${grafana-password:username}
admin-password = ${grafana-password:passwd}
secret-key = ${grafana-secret-key:passwd}
ssl-key-file = ${grafana-certificate:key-file}
ssl-cert-file = ${grafana-certificate:cert-file}
recipe = slapos.cookbook:wrapper
command-line =
{{ grafana_bin }} -config ${grafana-config-file:output} -homepath {{ grafana_homepath }}
wrapper-path = ${directory:service}/grafana
[grafana-certificate]
<= generate-certificate
[grafana-password]
recipe = slapos.cookbook:generate.password
username = admin
[grafana-secret-key]
recipe = slapos.cookbook:generate.password
[grafana-config-file]
<= config-file
context =
section grafana grafana
section apache_frontend apache-frontend
key slapparameter_dict slap-configuration:configuration
depends =
${grafana-provisioning-config-file:output}
[grafana-provisioning-config-file]
<= config-file
output = ${grafana:provisioning-datasources-dir}/datasource.yaml
context =
section influxdb influxdb
section loki loki
[grafana-listen-promise]
<= check-port-listening-promise
hostname= ${grafana:ipv6}
port = ${grafana:port}
[telegraf]
recipe = slapos.cookbook:wrapper
extra-config-dir = ${directory:telegraf-extra-config-dir}
# telegraf needs influxdb to be already listening before starting
command-line =
bash -c '${influxdb-listen-promise:path} && nice -19 chrt --idle 0 ionice -c3 {{ telegraf_bin }} --config ${telegraf-config-file:output} --config-directory ${:extra-config-dir}'
wrapper-path = ${directory:service}/telegraf
[telegraf-config-file]
<= config-file
context =
section influxdb influxdb
section telegraf telegraf
[loki]
recipe = slapos.cookbook:wrapper
command-line =
bash -c 'nice -19 chrt --idle 0 ionice -c3 {{ loki_bin }} -config.file=${loki-config-file:output}'
wrapper-path = ${directory:service}/loki
storage-boltdb-dir = ${directory:loki-storage-boltdb-dir}
storage-filesystem-dir = ${directory:loki-storage-filesystem-dir}
ip = ${instance-parameter:ipv4-random}
port = 3100
grpc-port = 9095
url = http://${:ip}:${:port}
[loki-config-file]
<= config-file
context =
section loki loki
[loki-listen-promise]
<= check-url-available-promise
url = ${loki:url}/ready
[promtail]
recipe = slapos.cookbook:wrapper
command-line =
bash -c 'nice -19 chrt --idle 0 ionice -c3 {{ promtail_bin }} -config.file=${promtail-config-file:output}'
wrapper-path = ${directory:service}/promtail
dir = ${directory:promtail-dir}
http-port = 19080
grpc-port = 19095
ip = ${instance-parameter:ipv4-random}
url = http://${:ip}:${:http-port}
[promtail-config-file]
<= config-file
context =
section promtail promtail
section loki loki
key slapparameter_dict slap-configuration:configuration
[promtail-listen-promise]
<= check-port-listening-promise
hostname= ${promtail:ip}
port = ${promtail:http-port}
[apache-frontend]
<= slap-connection
recipe = slapos.cookbook:requestoptional
name = Grafana Frontend
# XXX We have hardcoded SR URL here.
software-url = http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg
slave = true
config-url = ${grafana:url}
config-https-only = true
return = domain secure_access
[apache-frontend-available-promise]
<= check-url-available-promise
url = ${apache-frontend:connection-secure_access}
[promises]
recipe =
instance-promises =
${influxdb-listen-promise:path}
${influxdb-password-promise:wrapper-path}
${influxdb-database-ready-promise:wrapper-path}
${grafana-listen-promise:path}
${loki-listen-promise:path}
${promtail-listen-promise:path}
${promtail-listen-promise:path}
${apache-frontend-available-promise:path}
[publish-connection-parameter]
recipe = slapos.cookbook:publish
influxdb-url = ${influxdb:url}
influxdb-database = ${influxdb:database}
influxdb-username = ${influxdb:auth-username}
influxdb-password = ${influxdb:auth-password}
telegraf-extra-config-dir = ${telegraf:extra-config-dir}
grafana-url = ${grafana:url}
grafana-username = ${grafana:admin-user}
grafana-password = ${grafana:admin-password}
loki-url = ${loki:url}
promtail-url = ${promtail:url}
url = ${apache-frontend:connection-secure_access}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana/loki-config-file.cfg.in 0000664 0000000 0000000 00000002271 14241130220 0031364 0 ustar 00root root 0000000 0000000 auth_enabled: false
server:
http_listen_address: {{ loki['ip'] }}
http_listen_port: {{ loki['port'] }}
grpc_listen_address: {{ loki['ip'] }}
grpc_listen_port: {{ loki['grpc-port'] }}
ingester:
lifecycler:
address: {{ loki['ip'] }}
ring:
kvstore:
store: inmemory
replication_factor: 1
chunk_idle_period: 15m
schema_config:
configs:
- from: 2018-04-15
store: boltdb
object_store: filesystem
schema: v9
index:
prefix: index_
period: 168h
storage_config:
boltdb:
directory: {{ loki['storage-boltdb-dir'] }}
filesystem:
directory: {{ loki['storage-filesystem-dir'] }}
limits_config:
enforce_metric_name: false
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 0
table_manager:
chunk_tables_provisioning:
inactive_read_throughput: 0
inactive_write_throughput: 0
provisioned_read_throughput: 0
provisioned_write_throughput: 0
index_tables_provisioning:
inactive_read_throughput: 0
inactive_write_throughput: 0
provisioned_read_throughput: 0
provisioned_write_throughput: 0
retention_deletes_enabled: false
retention_period: 0
promtail-config-file.cfg.in 0000664 0000000 0000000 00000001056 14241130220 0032176 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana
server:
http_listen_address: {{ promtail['ip'] }}
http_listen_port: {{ promtail['http-port'] }}
grpc_listen_address: {{ promtail['ip'] }}
grpc_listen_port: {{ promtail['grpc-port'] }}
external_url: {{ promtail['url'] }}
positions:
filename: {{Â promtail['dir'] }}/positions.yaml
clients:
- url: {{ loki['url'] }}/api/prom/push
scrape_configs:
- job_name: test
static_configs:
- targets:
- localhost
labels:
job: grafanalogs
__path__: ./var/log/*log
{{ slapparameter_dict.get('promtail-extra-scrape-config', '') }}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana/software.cfg 0000664 0000000 0000000 00000007030 14241130220 0027471 0 ustar 00root root 0000000 0000000 [buildout]
extends =
../../stack/slapos.cfg
../../stack/nodejs.cfg
../../component/make/buildout.cfg
../../component/golang/buildout.cfg
../../component/openssl/buildout.cfg
../../component/curl/buildout.cfg
../../component/dash/buildout.cfg
buildout.hash.cfg
versions = versions
parts =
slapos-cookbook
instance-profile
gowork
influxdb-config-file
telegraf-config-file
grafana-config-file
grafana-provisioning-config-file
loki-config-file
promtail-config-file
[python]
part = python3
[nodejs]
<= nodejs-14.16.0
[go_github.com_grafana_grafana]
<= go-git-package
go.importpath = github.com/grafana/grafana
repository = https://github.com/grafana/grafana
revision = v7.5.2-0-gca413c612f
[go_github.com_grafana_loki]
<= go-git-package
go.importpath = github.com/grafana/loki
repository = https://github.com/perrinjerome/loki
revision = v2.2.1-1-gda6d45f2
[go_github.com_influxdata_influxdb]
<= go-git-package
go.importpath = github.com/influxdata/influxdb
repository = https://github.com/influxdata/influxdb
revision = v1.8.4-0-gbc8ec4384e
[go_github.com_influxdata_telegraf]
<= go-git-package
go.importpath = github.com/influxdata/telegraf
repository = https://github.com/influxdata/telegraf
revision = v1.17.3-0-g24a552b90b
[gowork]
install =
${go_github.com_grafana_loki:location}:./cmd/loki
${go_github.com_grafana_loki:location}:./cmd/promtail
${go_github.com_grafana_loki:location}:./cmd/logcli
${go_github.com_influxdata_telegraf:location}:./cmd/...
${go_github.com_influxdata_influxdb:location}:./cmd/...
# disable cgo, to prevent loki/promtail from using go-systemd
environment =
CGO_ENABLED = 0
telegraf-bin = ${:bin}/telegraf
influx-bin = ${:bin}/influx
influxd-bin = ${:bin}/influxd
grafana-bin = ${:bin}/grafana-server
grafana-homepath = ${grafana:homepath}
loki-bin = ${:bin}/loki
promtail-bin = ${:bin}/promtail
[grafana]
recipe = plone.recipe.command
command = bash -c "
cd ${:homepath} &&
. ${gowork:env.sh} &&
# Unlike the loki, grafana _needs_ CGO_ENABLED, so we override here
export CGO_ENABLED=1 &&
go run build.go setup && \
go run build.go build && \
${yarn:location}/bin/yarn install --pure-lockfile && \
${yarn:location}/bin/yarn run build && \
${yarn:location}/bin/yarn run plugins:build-bundled && \
# Cleanup yarn and Cypress caches
rm -rf ${buildout:directory}/.cache/Cypress/ && \
rm -rf ${buildout:directory}/.cache/yarn/
"
homepath = ${go_github.com_grafana_grafana:location}
stop-on-error = true
[download-file-base]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:filename}
[influxdb-config-file]
<= download-file-base
[telegraf-config-file]
<= download-file-base
[grafana-config-file]
<= download-file-base
[grafana-provisioning-config-file]
<= download-file-base
[loki-config-file]
<= download-file-base
[promtail-config-file]
<= download-file-base
[instance-profile]
recipe = slapos.recipe.template:jinja2
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/instance.cfg
extensions = jinja2.ext.do
context =
section buildout buildout
key openssl_bin openssl-output:openssl
key telegraf_bin gowork:telegraf-bin
key influxd_bin gowork:influxd-bin
key influx_bin gowork:influx-bin
key grafana_bin gowork:grafana-bin
key grafana_homepath gowork:grafana-homepath
key loki_bin gowork:loki-bin
key promtail_bin gowork:promtail-bin
key curl_bin :curl-bin
key dash_bin :dash-bin
curl-bin = ${curl:location}/bin/curl
dash-bin = ${dash:location}/bin/dash
[versions]
inotifyx = 0.2.2
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana/software.cfg.json 0000664 0000000 0000000 00000000552 14241130220 0030443 0 ustar 00root root 0000000 0000000 {
"name": "Grafana",
"description": "Grafana, Telegraf and Influxdb",
"serialisation": "xml",
"software-type": {
"default": {
"title": "Default",
"description": "Grafana, Telegraf and Influxdb in same partition",
"request": "instance-input-schema.json",
"response": "instance-output-schema.json",
"index": 0
}
}
}
telegraf-config-file.cfg.in 0000664 0000000 0000000 00000011456 14241130220 0032145 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana # Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared plugins.
# Even if a plugin has no configuration, it must be declared in here
# to be active. Declaring a plugin means just specifying the name
# as a section with no variables. To deactivate a plugin, comment
# out the name and any variables.
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
# file would generate.
# One rule that plugins conform to is wherever a connection string
# can be passed, the values '' and 'localhost' are treated specially.
# They indicate to the plugin to use their own builtin configuration to
# connect to the local system.
# NOTE: The configuration has a few required parameters. They are marked
# with 'required'. Be sure to edit those to make this configuration work.
# Tags can also be specified via a normal map, but only one form at a time:
[tags]
# dc = "us-east-1"
# Configuration for telegraf agent
[agent]
# Default data collection interval for all plugins
interval = "10s"
# Rounds collection interval to 'interval'
# ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
# Default data flushing interval for all outputs. You should not set this below
# interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
# Jitter the flush interval by a random amount. This is primarily to avoid
# large write spikes for users running a large number of telegraf instances.
# ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
# Run telegraf in debug mode
debug = false
# Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
[outputs]
# Configuration for influxdb server to send metrics to
[outputs.influxdb]
# The full HTTP or UDP endpoint URL for your InfluxDB instance
# Multiple urls can be specified for InfluxDB cluster support.
# urls = ["udp://localhost:8089"] # UDP endpoint example
# XXX XXX XXX
#urls = ["http://localhost:8086"] # required
urls = ["{{ influxdb['url'] }}"]
insecure_skip_verify = true # because we are using a self signed certificate
# The target database for metrics (telegraf will create it if not exists)
database = "{{ influxdb['database'] }}" # required
# Precision of writes, valid values are n, u, ms, s, m, and h
# note: using second precision greatly helps InfluxDB compression
precision = "s"
# Connection timeout (for the connection with InfluxDB), formatted as a string.
# If not provided, will default to 0 (no timeout)
# timeout = "5s"
username = "{{ influxdb['auth-username'] }}"
password = "{{ influxdb['auth-password'] }}"
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
###############################################################################
# PLUGINS #
###############################################################################
# Read metrics about cpu usage
[cpu]
# Whether to report per-cpu stats or not
percpu = true
# Whether to report total system cpu stats or not
totalcpu = true
# Comment this line if you want the raw CPU time metrics
drop = ["cpu_time"]
# Read metrics about memory usage
[mem]
# no configuration
[disk]
[io]
[system]
###############################################################################
# ERP5 - PLUGINS #
###############################################################################
#
# Left here as example, don't edit this file directly, but place your config
# files in {{ telegraf['extra-config-dir'] }}
#
#[mysql]
# servers = ["root@unix(/srv/slapgrid/slappart12/srv/runner/instance/slappart1/var/run/mariadb.sock)/erp5"]
#[memcached]
# # XXX kumofs does not support memcached's stat command
# servers = ["10.0.248.233:2013", "10.0.248.233:2003"]
#[haproxy]
# servers = ["http://10.0.121.162:2150/haproxy", "http://10.0.121.162:2152/haproxy"]
#[[inputs.exec]]
# commands = ["/srv/slapgrid/slappart0/bin/slapsensor /srv/slapgrid/slappart0/srv/runner/instance/etc/supervisord.conf"]
# name_suffix = "_slapos"
# interval = "5s"
###############################################################################
# SERVICE PLUGINS #
###############################################################################
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana/test/ 0000775 0000000 0000000 00000000000 14241130220 0026135 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana/test/README.md 0000664 0000000 0000000 00000000043 14241130220 0027411 0 ustar 00root root 0000000 0000000 Tests for Grafana software release
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana/test/setup.py 0000664 0000000 0000000 00000003722 14241130220 0027653 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.grafana'
long_description = open("README.md").read()
setup(
name=name,
version=version,
description="Test for SlapOS' Grafana",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'slapos.libnetworkcache',
'erp5.util',
'requests',
'six',
'supervisor',
'psutil',
],
zip_safe=True,
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/grafana/test/test.py 0000664 0000000 0000000 00000031237 14241130220 0027474 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2019 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from __future__ import unicode_literals
import io
import logging
import os
import tempfile
import textwrap
import time
import psutil
import requests
from six.moves import configparser
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'software.cfg')))
class GrafanaTestCase(SlapOSInstanceTestCase):
"""Base test case for grafana.
Since the instances takes time to start and stop,
we increase the number of retries.
"""
instance_max_retry = 50
report_max_retry = 30
class TestGrafana(GrafanaTestCase):
def setUp(self):
self.grafana_url = self.computer_partition.getConnectionParameterDict(
)['grafana-url']
def test_grafana_available(self):
resp = requests.get(self.grafana_url, verify=False)
self.assertEqual(requests.codes.ok, resp.status_code)
def test_grafana_api(self):
# check API is usable
api_org_url = '{self.grafana_url}/api/org'.format(**locals())
resp = requests.get(api_org_url, verify=False)
self.assertEqual(requests.codes.unauthorized, resp.status_code)
connection_params = self.computer_partition.getConnectionParameterDict()
resp = requests.get(
api_org_url,
verify=False,
auth=requests.auth.HTTPBasicAuth(
connection_params['grafana-username'],
connection_params['grafana-password'],
))
self.assertEqual(requests.codes.ok, resp.status_code)
self.assertEqual(1, resp.json()['id'])
def test_grafana_datasource_povisinonned(self):
# data sources are provisionned
connection_params = self.computer_partition.getConnectionParameterDict()
resp = requests.get(
'{self.grafana_url}/api/datasources'.format(**locals()),
verify=False,
auth=requests.auth.HTTPBasicAuth(
connection_params['grafana-username'],
connection_params['grafana-password'],
))
self.assertEqual(requests.codes.ok, resp.status_code)
self.assertEqual(
sorted(['influxdb', 'loki']),
sorted([ds['type'] for ds in resp.json()]))
def test_email_disabled(self):
config = configparser.ConfigParser()
# grafana config file is like an ini file with an implicit default section
with open(
os.path.join(self.computer_partition_root_path, 'etc',
'grafana-config-file.cfg')) as f:
config.readfp(io.StringIO('[default]\n' + f.read()))
self.assertEqual(config.get('smtp', 'enabled'), 'false')
class TestGrafanaEmailEnabled(GrafanaTestCase):
__partition_reference__ = 'mail'
smtp_verify_ssl = "true"
smtp_skip_verify = "false"
@classmethod
def getInstanceParameterDict(cls):
return {
"smtp-server": "smtp.example.com:25",
"smtp-username": "smtp_username",
"smtp-password": "smtp_password",
'smtp-verify-ssl': cls.smtp_verify_ssl,
"email-from-address": "grafana@example.com",
"email-from-name": "Grafana From Name",
}
def test_email_enabled(self):
config = configparser.ConfigParser()
with open(
os.path.join(self.computer_partition_root_path, 'etc',
'grafana-config-file.cfg')) as f:
config.readfp(io.StringIO('[default]\n' + f.read()))
self.assertEqual(config.get('smtp', 'enabled'), 'true')
self.assertEqual(config.get('smtp', 'host'), 'smtp.example.com:25')
self.assertEqual(config.get('smtp', 'user'), 'smtp_username')
self.assertEqual(config.get('smtp', 'password'), '"""smtp_password"""')
self.assertEqual(config.get('smtp', 'skip_verify'), self.smtp_skip_verify)
self.assertEqual(config.get('smtp', 'from_address'), 'grafana@example.com')
self.assertEqual(config.get('smtp', 'from_name'), 'Grafana From Name')
class TestGrafanaEmailEnabledSkipVerify(TestGrafanaEmailEnabled):
smtp_verify_ssl = "false"
smtp_skip_verify = "true"
class TestInfluxDb(GrafanaTestCase):
def setUp(self):
self.influxdb_url = self.computer_partition.getConnectionParameterDict(
)['influxdb-url']
def test_influxdb_available(self):
ping_url = '{self.influxdb_url}/ping'.format(**locals())
resp = requests.get(ping_url, verify=False)
self.assertEqual(requests.codes.no_content, resp.status_code)
def test_influxdb_api(self):
query_url = '{self.influxdb_url}/query'.format(**locals())
connection_params = self.computer_partition.getConnectionParameterDict()
for i in range(10):
# retry, as it may take a little delay to create databases
resp = requests.get(
query_url,
verify=False,
params=dict(
q='SHOW DATABASES',
u=connection_params['influxdb-username'],
p=connection_params['influxdb-password']))
self.assertEqual(requests.codes.ok, resp.status_code)
result, = resp.json()['results']
if result['series'] and 'values' in result['series'][0]:
break
time.sleep(0.5 * i)
self.assertIn(
[connection_params['influxdb-database']], result['series'][0]['values'])
class TestTelegraf(GrafanaTestCase):
def test_telegraf_running(self):
with self.slap.instance_supervisor_rpc as supervisor:
all_process_info = supervisor.getAllProcessInfo()
process_info, = [p for p in all_process_info if 'telegraf' in p['name']]
self.assertEqual('RUNNING', process_info['statename'])
class TestLoki(GrafanaTestCase):
@classmethod
def getInstanceParameterDict(cls):
cls._logfile = tempfile.NamedTemporaryFile(suffix='log')
return {
'promtail-extra-scrape-config':
textwrap.dedent(
r'''
- job_name: {cls.__name__}
pipeline_stages:
- match:
selector: '{{job="{cls.__name__}"}}'
stages:
- multiline:
firstline: '^\d{{4}}-\d{{2}}-\d{{2}}\s\d{{1,2}}\:\d{{2}}\:\d{{2}}\,\d{{3}}'
max_wait_time: 3s
- regex:
expression: '^(?P.*) - (?P\S+) - (?P\S+) - (?P.*)'
- timestamp:
format: 2006-01-02T15:04:05Z00:00
source: timestamp
- labels:
level:
name:
static_configs:
- targets:
- localhost
labels:
job: {cls.__name__}
__path__: {cls._logfile.name}
''').format(**locals())
}
@classmethod
def tearDownClass(cls):
cls._logfile.close()
super(TestLoki, cls).tearDownClass()
def setUp(self):
self.loki_url = self.computer_partition.getConnectionParameterDict(
)['loki-url']
def test_loki_available(self):
self.assertEqual(
requests.codes.ok,
requests.get('{self.loki_url}/ready'.format(**locals()),
verify=False).status_code)
def test_log_ingested(self):
# create a logger logging to the file that we have
# configured in instance parameter.
test_logger = logging.getLogger(self.id())
test_logger.propagate = False
test_logger.setLevel(logging.INFO)
test_handler = logging.FileHandler(filename=self._logfile.name)
test_handler.setFormatter(
logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
test_logger.addHandler(test_handler)
test_logger.info("testing message")
test_logger.info("testing another message")
test_logger.warning("testing warn")
# log an exception, which will be multi line in log file.
def nested1():
def nested2():
raise ValueError('boom')
nested2()
try:
nested1()
except ValueError:
test_logger.exception("testing exception")
# Check our messages have been ingested
# we retry a few times, because there's a short delay until messages are
# ingested and returned.
for i in range(60):
resp = requests.get(
'{self.loki_url}/api/prom/query?query={{job="TestLoki"}}'.format(
**locals()),
verify=False).json()
if len(resp.get('streams', [])) < 3:
time.sleep(0.5 * i)
continue
warn_stream_list = [stream for stream in resp['streams'] if 'level="WARNING"' in stream['labels']]
self.assertEqual(1, len(warn_stream_list), resp['streams'])
warn_stream, = warn_stream_list
self.assertIn("testing warn", warn_stream['entries'][0]['line'])
info_stream_list = [stream for stream in resp['streams'] if 'level="INFO"' in stream['labels']]
self.assertEqual(1, len(info_stream_list), resp['streams'])
info_stream, = info_stream_list
self.assertTrue(
[
line for line in info_stream['entries']
if "testing message" in line['line']
])
self.assertTrue(
[
line for line in info_stream['entries']
if "testing another message" in line['line']
])
error_stream_list = [stream for stream in resp['streams'] if 'level="ERROR"' in stream['labels']]
self.assertEqual(1, len(error_stream_list), resp['streams'])
error_stream, = error_stream_list
line, = [line['line'] for line in error_stream['entries']]
# this entry is multi-line
self.assertIn('testing exception\nTraceback (most recent call last):\n', line)
self.assertIn('ValueError: boom', line)
# The labels we have configued are also available
resp = requests.get(
'{self.loki_url}/api/prom/label'.format(**locals()),
verify=False).json()
self.assertIn('level', resp['values'])
self.assertIn('name', resp['values'])
class TestListenInPartition(GrafanaTestCase):
def setUp(self):
with self.slap.instance_supervisor_rpc as supervisor:
all_process_info = supervisor.getAllProcessInfo()
self.process_dict = {
p['name'].replace('-on-watch', ''): psutil.Process(p['pid'])
for p in all_process_info if p['name'] != 'watchdog'
}
def test_grafana_listen(self):
self.assertEqual(
[
c.laddr for c in self.process_dict['grafana'].connections()
if c.status == 'LISTEN'
],
[(self._ipv6_address, 8180)],
)
def test_influxdb_listen(self):
self.assertEqual(
sorted([
c.laddr for c in self.process_dict['influxdb'].connections()
if c.status == 'LISTEN'
]),
[
(self._ipv4_address, 8088),
(self._ipv6_address, 8086),
],
)
def test_telegraph_listen(self):
self.assertEqual(
[
c.laddr for c in self.process_dict['telegraf'].connections()
if c.status == 'LISTEN'
],
[],
)
def test_loki_listen(self):
self.assertEqual(
sorted([
c.laddr for c in self.process_dict['loki'].connections()
if c.status == 'LISTEN'
]),
[
(self._ipv4_address, 3100),
(self._ipv4_address, 9095),
],
)
def test_promtail_listen(self):
self.assertEqual(
sorted([
c.laddr for c in self.process_dict['promtail'].connections()
if c.status == 'LISTEN'
]),
[
(self._ipv4_address, 19080),
(self._ipv4_address, 19095),
],
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/headless-chromium/ 0000775 0000000 0000000 00000000000 14241130220 0027170 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/headless-chromium/README.md 0000664 0000000 0000000 00000001464 14241130220 0030454 0 ustar 00root root 0000000 0000000 # Headless Chromium
This software release compiles and runs a headless Chromium shell and
exposes an interface to connect to it remotely from another browser.
After deployment, the instance is configured like this:
```
Caddy frontend
|
(HTTPS, IPv6)
|
Nginx proxy, basic authentication
|
(HTTP, IPv4)
|
Chromium shell
```
The proxy is necessary because Chromium only accepts local connections
for remote debugging.
## Parameters
The following instance parameters can be configured:
- target-url: URL for Chromium to load on startup.
- remote-debugging-port: Port for Chromium to listen on.
- nginx-proxy-port: Port for Ningx proxy to listen on.
- monitor-httpd-port: Port for monitor.
See `instance-headless-chromium-input-schema.json` for default values.
buildout.hash.cfg 0000664 0000000 0000000 00000000673 14241130220 0032351 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/headless-chromium [template-cfg]
filename = instance.cfg.in
md5sum = 4d73fe3c5d286163974bdb79c838e030
[instance-headless-chromium]
_update_hash_filename_ = instance-headless-chromium.cfg.in
md5sum = d72e5f6e159081f1c204ceb7ec0c0caf
[template-nginx-conf]
_update_hash_filename_ = templates/nginx.conf.in
md5sum = c4d09d2b819f624087ef4c38551dfe2f
[template-mime-types]
_update_hash_filename_ = templates/mime-types.in
md5sum = 4ef94a7b458d885cd79ba0b930a5727e
instance-headless-chromium-input-schema.json 0000664 0000000 0000000 00000001510 14241130220 0037607 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/headless-chromium {
"type": "object",
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Input Parameters",
"properties": {
"target-url": {
"description": "URL for Chromium to load on startup.",
"title": "Target URL",
"type": "string",
"default": "https://www.example.com"
},
"remote-debugging-port": {
"description": "Port for Chromium to listen on.",
"title": "Remote Debugging Port",
"type": "integer",
"default": 8081
},
"nginx-proxy-port": {
"description": "Port for Nginx proxy to listen on.",
"title": "Nginx Proxy Port",
"type": "integer",
"default": 8082
},
"monitor-httpd-port": {
"description": "Port for monitor frontend.",
"title": "Monitor Httpd Port",
"type": "integer",
"default": 8083
}
}
}
instance-headless-chromium-output-schema.json 0000664 0000000 0000000 00000001700 14241130220 0040011 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/headless-chromium {
"type": "object",
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Values returned by headless Chromium instantiation",
"properties": {
"frontend-url": {
"description": "URL to access remote debugging interface",
"type": "string"
},
"username": {
"description": "Username for remote debugging interface",
"type": "string"
},
"password": {
"description": "Password for remote debugging interface",
"type": "string"
},
"monitor-base-url": {
"description": "Base URL used by monitor",
"type": "string"
},
"monitor-setup-url": {
"description": "One-click link to setup and monitor feeds",
"type": "string"
},
"proxy-url": {
"description": "Raw IPv6 address used by Nginx proxy",
"type": "string"
},
"remote-debug-url": {
"description": "Local IPv4 address used by Chromium",
"type": "string"
}
}
}
instance-headless-chromium.cfg.in 0000664 0000000 0000000 00000014223 14241130220 0035414 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/headless-chromium {% set parameter_dict = dict(default_parameter_dict, **slapparameter_dict) %}
[buildout]
parts =
chromium-launcher
generate-passwd-file
nginx-config
nginx-mime-types
nginx-launcher
logrotate-entry-nginx
publish-connection-information
frontend-ok-promise
frontend-secure-promise
eggs-directory = {{ buildout['eggs-directory'] }}
develop-eggs-directory = {{ buildout['develop-eggs-directory'] }}
offline = true
extends = {{ parameter_list['template-monitor'] }}
# Create necessary directories.
[directory]
recipe = slapos.cookbook:mkdirectory
home = ${buildout:directory}
tmp = ${:home}/tmp
log = ${:home}/log
etc = ${:home}/etc
ssl = ${:etc}/ssl
service = ${:etc}/service
# Options for instance configuration. See README.md for a list of
# options that can be configured when requesting an instance.
[headless-chromium]
ipv4 = {{ partition_ipv4 }}
ipv6 = {{ partition_ipv6 }}
remote-debugging-port = {{ parameter_dict['remote-debugging-port'] }}
url = {{ parameter_dict['target-url'] }}
remote-debugging-address = ${:ipv4}:${:remote-debugging-port}
devtools-frontend-root = {{ parameter_list['devtools-frontend'] }}
nginx-port = {{ parameter_dict['nginx-proxy-port'] }}
proxy-address = [${:ipv6}]:${:nginx-port}
nginx-config-target = ${directory:etc}/nginx.conf
nginx-pid-path = ${directory:log}/nginx.pid
nginx-temp-path = ${directory:tmp}
nginx-error-log = ${directory:log}/nginx-error.log
nginx-access-log = ${directory:log}/nginx-access.log
nginx-htpasswd-file = ${directory:etc}/.htpasswd
nginx-key-file = ${frontend-instance-certificate:key-file}
nginx-cert-file = ${frontend-instance-certificate:cert-file}
nginx-mime-types = ${directory:etc}/mime-types
# Create a launcher script in /etc/service for the headless shell
# executable.
[chromium-launcher]
recipe = slapos.recipe.template
inline =
#!/bin/sh
export FONTCONFIG_FILE=${font-config:output}
exec {{ parameter_list['chromium-wrapper'] }} \
--remote-debugging-address=${headless-chromium:ipv4} \
--remote-debugging-port=${headless-chromium:remote-debugging-port} \
${headless-chromium:url}
output = ${directory:service}/chromium
# Configure and launch the proxy server.
[nginx-config]
recipe = slapos.recipe.template:jinja2
url = {{ parameter_list['template-nginx-config'] }}
output = ${headless-chromium:nginx-config-target}
context =
section param_headless_chromium headless-chromium
[nginx-mime-types]
recipe = slapos.recipe.template
url = {{ parameter_list['template-mime-types'] }}
output = ${headless-chromium:nginx-mime-types}
[nginx-launcher]
recipe = slapos.cookbook:wrapper
command-line = {{ parameter_list['nginx-location'] }}/sbin/nginx -c ${headless-chromium:nginx-config-target}
wrapper-path = ${directory:service}/nginx
[logrotate-entry-nginx]
<= logrotate-entry-base
name = nginx
log = ${headless-chromium:nginx-error-log} ${headless-chromium:nginx-access-log}
[frontend-instance-password]
recipe = slapos.cookbook:generate.password
username = admin
bytes = 12
[generate-passwd-file]
recipe = plone.recipe.command
command =
echo -n '${frontend-instance-password:username}:' > ${headless-chromium:nginx-htpasswd-file}
openssl passwd -apr1 '${frontend-instance-password:passwd}' >> ${headless-chromium:nginx-htpasswd-file}
environment =
PATH={{ parameter_list['openssl-location'] }}/bin:%(PATH)s
# Generate a self-signed TLS certificate.
[frontend-instance-certificate]
recipe = plone.recipe.command
command =
if [ ! -e ${:key-file} ]
then
openssl req -x509 -nodes -days 3650 \
-subj "/C=AA/ST=X/L=X/O=Dis/CN=${:common-name}" \
-newkey rsa:1024 -keyout ${:key-file} \
-out ${:cert-file}
openssl x509 -addtrust serverAuth \
-in ${:cert-file} \
-out ${:cert-file}
fi
update-command = ${:command}
key-file = ${directory:ssl}/${:_buildout_section_name_}.key
cert-file = ${directory:ssl}/${:_buildout_section_name_}.cert
common-name = ${headless-chromium:ipv6}
environment =
PATH={{ parameter_list['openssl-location'] }}/bin:%(PATH)s
# Generate a fonts.conf file.
[font-config]
recipe = slapos.recipe.template:jinja2
url = {{ parameter_list['template-fonts-conf'] }}
output = ${directory:etc}/fonts.conf
context =
key cachedir :cache-dir
key fonts :fonts
key includes :includes
cache-dir =
${directory:etc}/.fontconfig.cache
fonts =
{{ parameter_list['liberation-fonts-location'] }}
includes =
{{ parameter_list['fontconfig-location'] }}/etc/fonts/conf.d
[publish-connection-information]
recipe = slapos.cookbook:publish
<= monitor-publish
remote-debug-url = http://${headless-chromium:remote-debugging-address}
proxy-url = https://${headless-chromium:proxy-address}
frontend-url = ${remote-debugging-frontend:connection-secure_access}
username = ${frontend-instance-password:username}
password = ${frontend-instance-password:passwd}
# Request a frontend URL from the CDN for the remote debugging interface.
[remote-debugging-frontend]
<= slap-connection
recipe = slapos.cookbook:requestoptional
name = Headless Chromium Remote Debugging Frontend
software-url = http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg
slave = true
config-url = https://${headless-chromium:proxy-address}
config-https-only = true
config-type = websocket
config-websocket-path-list = /devtools
return = domain secure_access
# Monitoring: check that the Chromium process is alive and responding to
# requests through the proxy.
[monitor-instance-parameter]
monitor-httpd-port = {{ parameter_dict['monitor-httpd-port'] }}
# Promise to make sure the remote debugging frontend returns 200 when
# queried with the correct credentials.
[frontend-ok-promise]
<= monitor-promise-base
promise = check_url_available
name = headless-chromium-frontend-ok.py
url = ${remote-debugging-frontend:connection-secure_access}
config-url = ${:url}
config-username = ${frontend-instance-password:username}
config-password = ${frontend-instance-password:passwd}
# Promise to make sure that the remote debugging frontend returns 401
# when queried with no credentials.
[frontend-secure-promise]
<= monitor-promise-base
promise = check_url_available
name = headless-chromium-frontend-secure.py
url = ${remote-debugging-frontend:connection-secure_access}
config-url = ${:url}
config-http-code = 401
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/headless-chromium/instance.cfg.in 0000664 0000000 0000000 00000003331 14241130220 0032062 0 ustar 00root root 0000000 0000000 [buildout]
parts =
switch-softwaretype
eggs-directory = {{ buildout['eggs-directory'] }}
develop-eggs-directory = {{ buildout['develop-eggs-directory'] }}
offline = true
[profile-common]
openssl-location = {{ openssl_location }}
nginx-location = {{ nginx_location }}
liberation-fonts-location = {{ liberation_fonts_location }}
fontconfig-location = {{ fontconfig_location }}
chromium-wrapper = {{ chromium_wrapper }}
devtools-frontend = {{ devtools_frontend }}
template-nginx-config = {{ template_nginx_config_target }}
template-fonts-conf = {{ template_fonts_conf_target }}
template-monitor = {{ template_monitor }}
template-mime-types = {{ template_mime_types_target }}
[instance-headless-chromium]
recipe = slapos.recipe.template:jinja2
url = {{ template_instance_headless_chromium_target }}
output = ${buildout:directory}/${:filename}
filename = instance-headless-chromium.cfg
context =
section buildout buildout
section parameter_list profile-common
key partition_ipv4 slap-configuration:ipv4-random
key partition_ipv6 slap-configuration:ipv6-random
key slapparameter_dict slap-configuration:configuration
jsonkey default_parameter_dict :default-parameters
default-parameters =
{
"remote-debugging-port": 8081,
"nginx-proxy-port": 8082,
"target-url": "https://www.example.com",
"monitor-httpd-port": 8083
}
[switch-softwaretype]
recipe = slapos.cookbook:switch-softwaretype
RootSoftwareInstance = ${:default}
default = instance-headless-chromium:output
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/headless-chromium/software.cfg 0000664 0000000 0000000 00000002600 14241130220 0031501 0 ustar 00root root 0000000 0000000 [buildout]
extends =
buildout.hash.cfg
../../stack/slapos.cfg
../../stack/monitor/buildout.cfg
../../component/headless-chromium/buildout.cfg
../../component/openssl/buildout.cfg
../../component/nginx/buildout.cfg
../../component/fonts/buildout.cfg
../../component/fontconfig/buildout.cfg
parts =
slapos-cookbook
template-cfg
[python]
part = python3
[template-cfg]
recipe = slapos.recipe.template:jinja2
output = ${buildout:directory}/template.cfg
url = ${:_profile_base_location_}/${:filename}
context =
section buildout buildout
key openssl_location openssl:location
key nginx_location nginx:location
key liberation_fonts_location liberation-fonts:location
key fontconfig_location fontconfig:location
key chromium_wrapper headless-chromium-wrapper:output
key devtools_frontend headless-chromium:devtools-frontend
key template_nginx_config_target template-nginx-conf:target
key template_mime_types_target template-mime-types:target
key template_fonts_conf_target template-fonts-conf:output
key template_instance_headless_chromium_target instance-headless-chromium:target
key template_monitor monitor2-template:output
[download-base]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:_update_hash_filename_}
[instance-headless-chromium]
<= download-base
[template-nginx-conf]
<= download-base
[template-mime-types]
<= download-base
software.cfg.json 0000664 0000000 0000000 00000000612 14241130220 0032373 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/headless-chromium {
"name": "Headless Chromium",
"description": "Headless (stripped-down) Chromium shell",
"serialisation": "xml",
"software-type": {
"default": {
"title": "Default",
"description": "Standalone headless shell",
"request": "instance-headless-chromium-input-schema.json",
"response": "instance-headless-chromium-output-schema.json",
"index": 0
}
}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/headless-chromium/templates/ 0000775 0000000 0000000 00000000000 14241130220 0031166 5 ustar 00root root 0000000 0000000 mime-types.in 0000664 0000000 0000000 00000004013 14241130220 0033526 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/headless-chromium/templates types {
text/html html htm shtml;
text/css css;
text/xml xml rss;
image/gif gif;
image/jpeg jpeg jpg;
application/x-javascript js;
application/atom+xml atom;
text/mathml mml;
text/plain txt;
text/vnd.sun.j2me.app-descriptor jad;
text/vnd.wap.wml wml;
text/x-component htc;
image/png png;
image/tiff tif tiff;
image/vnd.wap.wbmp wbmp;
image/x-icon ico;
image/x-jng jng;
image/x-ms-bmp bmp;
image/svg+xml svg svgz;
application/java-archive jar war ear;
application/mac-binhex40 hqx;
application/msword doc;
application/pdf pdf;
application/postscript ps eps ai;
application/rtf rtf;
application/vnd.ms-excel xls;
application/vnd.ms-powerpoint ppt;
application/vnd.wap.wmlc wmlc;
application/vnd.google-earth.kml+xml kml;
application/vnd.google-earth.kmz kmz;
application/x-7z-compressed 7z;
application/x-cocoa cco;
application/x-java-archive-diff jardiff;
application/x-java-jnlp-file jnlp;
application/x-makeself run;
application/x-perl pl pm;
application/x-pilot prc pdb;
application/x-rar-compressed rar;
application/x-redhat-package-manager rpm;
application/x-sea sea;
application/x-shockwave-flash swf;
application/x-stuffit sit;
application/x-tcl tcl tk;
application/x-x509-ca-cert der pem crt;
application/x-xpinstall xpi;
application/xhtml+xml xhtml;
application/zip zip;
application/octet-stream bin exe dll;
application/octet-stream deb;
application/octet-stream dmg;
application/octet-stream eot;
application/octet-stream iso img;
application/octet-stream msi msp msm;
application/ogg ogx;
audio/midi mid midi kar;
audio/mpeg mpga mpega mp2 mp3 m4a;
audio/ogg oga ogg spx;
audio/x-realaudio ra;
audio/webm weba;
video/3gpp 3gpp 3gp;
video/mp4 mp4;
video/mpeg mpeg mpg mpe;
video/ogg ogv;
video/quicktime mov;
video/webm webm;
video/x-flv flv;
video/x-mng mng;
video/x-ms-asf asx asf;
video/x-ms-wmv wmv;
video/x-msvideo avi;
}
nginx.conf.in 0000664 0000000 0000000 00000006230 14241130220 0033507 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/headless-chromium/templates pid {{ param_headless_chromium['nginx-pid-path'] }};
error_log {{ param_headless_chromium['nginx-error-log'] }};
events {
worker_connections 1024;
}
http {
access_log {{ param_headless_chromium['nginx-access-log'] }};
include {{ param_headless_chromium['nginx-mime-types'] }};
default_type application/octet-stream;
types {
text/html html;
text/css css;
application/javascript js;
}
server {
listen {{ param_headless_chromium['proxy-address'] }} ssl;
# Require username/password to access remote debugging port.
auth_basic "Remote Debugging";
auth_basic_user_file {{ param_headless_chromium['nginx-htpasswd-file'] }};
# Use self-signed SSL certificate.
ssl_certificate {{ param_headless_chromium['nginx-cert-file'] }};
ssl_certificate_key {{ param_headless_chromium['nginx-key-file'] }};
client_body_temp_path {{ param_headless_chromium['nginx-temp-path'] }};
proxy_temp_path {{ param_headless_chromium['nginx-temp-path'] }};
fastcgi_temp_path {{ param_headless_chromium['nginx-temp-path'] }};
uwsgi_temp_path {{ param_headless_chromium['nginx-temp-path'] }};
scgi_temp_path {{ param_headless_chromium['nginx-temp-path'] }};
# All websocket connections are served from /devtools.
location /devtools {
proxy_http_version 1.1;
proxy_set_header Host {{ param_headless_chromium['remote-debugging-address'] }};
proxy_pass http://{{ param_headless_chromium['remote-debugging-address'] }};
proxy_set_header Upgrade "websocket";
proxy_set_header Connection "Upgrade";
}
# The DevTools frontend is served from /serve_file/@{version_hash}.
location ~ "^\/serve_file\/@[0-9a-f]{5,40}\/(.*)" {
alias {{ param_headless_chromium['devtools-frontend-root'] }}/$1;
}
location / {
proxy_http_version 1.1;
# The proxy must set the Host header to an IP address, since the
# headless Chromium shell refuses to run otherwise, for security
# reasons.
# See https://bugs.chromium.org/p/chromium/issues/detail?id=813540.
proxy_set_header Host {{ param_headless_chromium['remote-debugging-address'] }};
proxy_pass http://{{ param_headless_chromium['remote-debugging-address'] }};
# The browser security policy will prevent us from loading the
# Websocket connection without TLS, so we have to go through the
# frontend CDN URL. The tricky thing is that the frontend URL is
# not available yet when this file is built; what we do instead is
# use the given Host header.
sub_filter "ws={{ param_headless_chromium['remote-debugging-address'] }}" "wss=$host";
sub_filter_once on;
sub_filter_types application/json;
sub_filter "ws://{{ param_headless_chromium['remote-debugging-address'] }}" "wss://$host";
sub_filter_types application/json;
# We want to use our own DevTools frontend rather than
# https://chrome-devtools-frontend.appspot.com. There should be a
# --custom-devtools-frontend flag for Chromium, but it doesn't
# seem to work with the remote debugging port.
sub_filter "chrome-devtools-frontend.appspot.com" "$host";
sub_filter_types *;
}
}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/headless-chromium/test/ 0000775 0000000 0000000 00000000000 14241130220 0030147 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/headless-chromium/test/README.md 0000664 0000000 0000000 00000000055 14241130220 0031426 0 ustar 00root root 0000000 0000000 Tests for headless Chromium software release
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/headless-chromium/test/setup.py 0000664 0000000 0000000 00000003631 14241130220 0031664 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2021 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.headless-chromium'
long_description = open("README.md").read()
setup(
name=name,
version=version,
description="Test for SlapOS headless Chromium",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'slapos.libnetworkcache',
'requests',
],
zip_safe=True,
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/headless-chromium/test/test.py 0000664 0000000 0000000 00000005720 14241130220 0031504 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2021 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import os
import requests
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '../software.cfg')))
class TestHeadlessChromium(SlapOSInstanceTestCase):
def setUp(self):
self.connection_parameters = self.requestDefaultInstance().getConnectionParameterDict()
def test_remote_debugging_port(self):
# The headless browser should respond at /json with a nonempty list
# of available pages, each of which has a webSocketDebuggerUrl and a
# devtoolsFrontendUrl.
url = self.connection_parameters['remote-debug-url']
response = requests.get('%s/json' % url)
# Check that request was successful and the response was a nonempty
# list.
self.assertEqual(requests.codes['ok'], response.status_code)
self.assertTrue(len(response.json()) > 0)
# Check that the first page has the correct fields.
first_page = response.json()[0]
self.assertIn('webSocketDebuggerUrl', first_page)
self.assertIn('devtoolsFrontendUrl', first_page)
def test_devtools_frontend_ok(self):
# The proxy should serve the DevTools frontend from
# /serve_file/@{hash}/inspector.html, where {hash} is a 5-32 digit
# hash.
proxyURL = self.connection_parameters['proxy-url']
username = self.connection_parameters['username']
password = self.connection_parameters['password']
frontend = '/serve_file/@aaaaa/inspector.html'
response = requests.get(proxyURL + frontend, verify=False,
auth=(username, password))
self.assertEqual(requests.codes['ok'], response.status_code)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/hellorina/ 0000775 0000000 0000000 00000000000 14241130220 0025534 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/hellorina/instance-root.cfg.in 0000664 0000000 0000000 00000003150 14241130220 0031406 0 ustar 00root root 0000000 0000000 {% set sla_dict = {} -%}
{% for sla, ref_list in slapparameter_dict.get('sla-dict', {}).iteritems() -%}
{% do sla_dict.update(dict.fromkeys(ref_list, sla)) -%}
{% endfor -%}
{% macro sla(name, required=False) -%}
{% if required or name in sla_dict -%}
{% for k, (v,) in urlparse.parse_qs(sla_dict.pop(name), strict_parsing=1).iteritems() -%}
sla-{{ k }} = {{ v }}
{% endfor -%}
{% else -%}
sla-computer_guid = ${slap-connection:computer-id}
{% endif -%}
{% endmacro -%}
[directory]
recipe = slapos.cookbook:mkdirectory
home = ${buildout:directory}
etc = ${:home}/etc
var = ${:home}/var
# Executables put here will be started but not monitored (for startup scripts)
script = ${:etc}/run
# Executables put here will be started and monitored (for daemons)
service = ${:etc}/service
[server]
<= request-common-base
software-type = server
name = server
{{ sla('server') }}
return =
[proxy]
recipe = slapos.cookbook:wrapper
command-line = {{ rina_proxy }} ${server:instance-guid} ${:ipv6} ${:port}
wrapper-path = ${directory:service}/proxy
environment =
PATH={{ rina_tools_location }}/bin:%(PATH)s
ipv6 = {{ipv6}}
port = 8080
[publish]
recipe = slapos.cookbook:publish.serialised
url.proxy = http://[${proxy:ipv6}]:${proxy:port}
[request-common-base]
recipe = slapos.cookbook:request.serialised
software-url = ${slap-connection:software-release-url}
server-url = ${slap-connection:server-url}
key-file = ${slap-connection:key-file}
cert-file = ${slap-connection:cert-file}
computer-id = ${slap-connection:computer-id}
partition-id = ${slap-connection:partition-id}
[buildout]
parts =
publish
{{- assert(not sla_dict, sla_dict) }}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/hellorina/instance-server.cfg.in 0000664 0000000 0000000 00000000551 14241130220 0031733 0 ustar 00root root 0000000 0000000 [directory]
recipe = slapos.cookbook:mkdirectory
service = ${buildout:directory}/etc/service
[server]
recipe = slapos.cookbook:wrapper
command-line = {{rina_tools_location}}/bin/rina-echo-time -l --server-api {{instance_guid}}
wrapper-path = ${directory:service}/server
[publish]
recipe = slapos.cookbook:publish.serialised
[buildout]
parts = publish server
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/hellorina/instance.cfg.in 0000664 0000000 0000000 00000002430 14241130220 0030425 0 ustar 00root root 0000000 0000000 [buildout]
parts = switch-softwaretype
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration.serialised
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
[jinja2-template-base]
recipe = slapos.recipe.template:jinja2
filename = ${:_buildout_section_name_}.cfg
output = ${buildout:parts-directory}/${:_buildout_section_name_}/${:filename}
extensions = jinja2.ext.do
extra-context =
context =
key slapparameter_dict slap-configuration:configuration
raw rina_tools_location {{ rina_tools_location }}
${:extra-context}
[root]
<= jinja2-template-base
url = {{ instance_root }}
extra-context =
import urlparse urlparse
key ipv6 slap-configuration:ipv6-random
raw rina_proxy {{ rina_proxy }}
[server]
<= jinja2-template-base
url = {{ instance_server }}
extra-context =
key instance_guid slap-configuration:instance-guid
[switch-softwaretype]
recipe = slapos.cookbook:switch-softwaretype
override = {{ dumps(override_switch_softwaretype |default) }}
default = root:output
# BBB
RootSoftwareInstance = ${:default}
server = server:output
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/hellorina/proxy 0000775 0000000 0000000 00000004251 14241130220 0026645 0 ustar 00root root 0000000 0000000 #!/usr/bin/python
import httplib, socket, subprocess, sys, threading, urlparse
import BaseHTTPServer, SocketServer
class Popen(subprocess.Popen):
def stop(self):
if self.pid and self.returncode is None:
self.terminate()
t = threading.Timer(5, self.kill)
t.start()
# PY3: use waitid(WNOWAIT) and call self.poll() after t.cancel()
r = self.wait()
t.cancel()
return r
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
try:
path, query = self.path.split('?', 1)
except ValueError:
path = self.path
query = {}
else:
query = dict(urlparse.parse_qsl(query,
keep_blank_values=1, strict_parsing=1))
_, path = path.split('/')
if not _:
try:
return getattr(self, '_GET_' + path, None)(**query)
except (AttributeError, TypeError):
pass
self.send_error(httplib.BAD_REQUEST)
def _GET_(self):
self.send_response(httplib.FOUND)
self.send_header('Location', 'ping?count=4')
self.end_headers()
def _GET_ping(self, count=4):
count = int(count)
args = "rina-echo-time", "--server-api", server_api, "-c", str(count)
try:
p = Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except Exception, e:
self.send_error(httplib.INTERNAL_SERVER_ERROR)
raise
try:
self.send_response(httplib.OK)
self.send_header('Content-type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write('# count=%s\n' % count)
while 1:
line = p.stdout.readline()
if not line:
break
self.wfile.write(line)
finally:
p.stop()
class Server(SocketServer.ThreadingTCPServer):
allow_reuse_address = True
daemon_threads = True
address_family = socket.AF_INET6
if __name__ == "__main__":
server_api = sys.argv[1]
Server((sys.argv[2], int(sys.argv[3])), Handler).serve_forever()
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/hellorina/software.cfg 0000664 0000000 0000000 00000003262 14241130220 0030052 0 ustar 00root root 0000000 0000000 [buildout]
extends =
../../stack/slapos.cfg
../../component/rina-tools/buildout.cfg
parts =
slapos-cookbook
template
[file]
# For old GCC like 4.9.2 on Debian 8.
# XXX: This should be moved to component/rina-tools/buildout.cfg, next to where
# we force use of system GCC. However, our buildout patches are still not
# perfect concerning the processing of +=
environment +=
CFLAGS=-std=c99 -g -O2
[template]
recipe = slapos.recipe.template:jinja2
url = ${:_profile_base_location_}/instance.cfg.in
md5sum = d7506e861ef87977eaa554b8928d2c99
# XXX: "template.cfg" is hardcoded in instanciation recipe
output = ${buildout:directory}/template.cfg
context =
key develop_eggs_directory buildout:develop-eggs-directory
key eggs_directory buildout:eggs-directory
key rina_tools_location rina-tools:location
key instance_root instance-root:target
key instance_server instance-server:target
key rina_proxy proxy:location
[download-base]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:_buildout_section_name_}.cfg.in
[instance-root]
<= download-base
md5sum = 1c0e222aab51dfc598094e972f1d1482
[instance-server]
<= download-base
md5sum = 88a451b0f7f8def12713b92b91659b98
[proxy]
recipe = slapos.recipe.build
location = ${buildout:bin-directory}/${:_buildout_section_name_}
url = ${:_profile_base_location_}/${:_buildout_section_name_}
md5sum = 78b77a6bda9958f547f7d89b747731e3
install =
import os, sys
with open(self.download(options['url'], options['md5sum'])) as src, \
open(options['location'], 'w') as dst:
os.fchmod(dst.fileno(), 0o755)
src.readline()
dst.write('#!%s\n' % sys.executable)
dst.write(src.read())
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/helloworld/ 0000775 0000000 0000000 00000000000 14241130220 0025732 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/helloworld/buildout.hash.cfg 0000664 0000000 0000000 00000001433 14241130220 0031165 0 ustar 00root root 0000000 0000000 # THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[instance-profile]
filename = instance.cfg.in
md5sum = 483b76d8e6bf72d72a38a3f7bf66fe08
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/helloworld/instance.cfg.in 0000664 0000000 0000000 00000011126 14241130220 0030625 0 ustar 00root root 0000000 0000000 #############################
#
# Deploy hello-world instance
#
#############################
[buildout]
parts =
directory
publish-connection-parameter
# Define egg directories to be the one from Software Release
# (/opt/slapgrid/...)
# Always the same.
eggs-directory = {{ buildout['eggs-directory'] }}
develop-eggs-directory = {{ buildout['develop-eggs-directory'] }}
offline = true
extends = {{ template_monitor }}
[instance-parameter]
# Fetch arbitrary parameters defined by the user in SlapOS Master for his instance.
# We use the slapconfiguration recipe with a few parameters (partition id,
# computer id, certificate, etc).
# It will then authenticate to SlapOS Master and fetch the instance parameters.
# The parameters are accessible from ${instance-parameter:configuration.name-of-parameter}
# Always the same. Just copy/paste.
# See docstring of slapos.cookbook:slapconfiguration for more information.
recipe = slapos.cookbook:slapconfiguration
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
# Define default parameter(s) that will be used later, in case user didn't
# specify it.
# All possible parameters should have a default.
# In our use case, we are expecting from the user to specify one (optional) parameter: "name". We put the default value here if he doesn't specify it, so that it doesn't crash.
configuration.name = John Doe
# If our use case requires that the user can specify a mail address so that his instance can mail to him (for example), we can do:
# configuration.mail-address =
# If the user doesn't specify it, it won't break and the recipe can handle it (i.e don't send any mail for example).
# Create all needed directories, depending on your needs
[directory]
recipe = slapos.cookbook:mkdirectory
home = ${buildout:directory}
etc = ${:home}/etc
var = ${:home}/var
# Executables put here will be started but not monitored (for startup scripts)
script = ${:etc}/run/
# Executables put here will be started and monitored (for daemons)
service = ${:etc}/service
# Path of the log directory used by our service (see [helloweb])
log = ${:var}/log
# Create a simple web server that says "hello " to the web.
[helloweb]
# helloworld service is listening on:
# - global IPv6 address, and
# - fixed port
#
# NOTE because every computer partition is allocated its own global IPv6
# address, it is ok to fix the port - different hello-world instances will have
# different IPv6 addresses and they all will be accessible at the same time.
ipv6 = ${instance-parameter:ipv6-random}
# full URL - for convenience
url = http://[${:ipv6}]:${:port}
# the service will log here
logfile = ${directory:log}/helloweb-${:kind}.log
# Actual script that starts the service:
# This recipe will try to "exec" the command-line after separating parameters.
recipe = slapos.cookbook:wrapper
command-line =
{{ buildout['bin-directory'] }}/helloweb-${:kind} --logfile ${:logfile}
${:ipv6} ${:port} ${instance-parameter:configuration.name}
# Put this shell script in the "etc/service" directory. Each executable of this
# repository will be started and monitored by supervisord. If a service
# exits/crashes, it will trigger a "bang" and cause a re-run of the instance.
wrapper-path = ${directory:service}/helloweb-${:kind}
# promise, that checks that helloweb service is alive
[helloweb-promise]
<= monitor-promise-base
promise = check_socket_listening
name = helloweb-${:kind}.py
{# macro to instantiate service of `kind` to listen on `port` #}
{% set service_list = [] %}
{% macro hellowebsrv(kind, port) %}
{% do service_list.append(kind) %}
[helloweb-{{ kind }}]
<= helloweb
kind = {{ kind }}
port = {{ port }}
[helloweb-{{ kind }}-promise]
<= helloweb-promise
kind = {{ kind }}
config-host = ${helloweb-{{ kind }}:ipv6}
config-port = {{ port }}
{% endmacro %}
# services instantiation
{{ hellowebsrv('python', 7777) }}
{{ hellowebsrv('ruby', 7778) }}
{{ hellowebsrv('go', 7779) }}
# register all services/promises to buildout parts
[buildout]
parts +=
{%- for kind in service_list %}
helloweb-{{ kind }}
helloweb-{{ kind }}-promise
{%- endfor %}
# Publish all the parameters needed for the user to connect to the instance.
# It can be anything: URL(s), password(s), or arbitrary parameters.
# Here we'll just echo back the entered name as instance parameter
[publish-connection-parameter]
recipe = slapos.cookbook:publish
name = Hello ${instance-parameter:configuration.name}!
{%- for kind in service_list %}
url.{{ kind }} = ${helloweb-{{ kind }}:url}
{%- endfor %}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/helloworld/software.cfg 0000664 0000000 0000000 00000003430 14241130220 0030245 0 ustar 00root root 0000000 0000000 [buildout]
extends =
# buildout.hash.cfg is used for automated hash calculation of managed
# instance files by calling update-hash
buildout.hash.cfg
# "slapos" stack describes basic things needed for 99.9% of SlapOS Software
# Releases
../../stack/slapos.cfg
../../stack/monitor/buildout.cfg
# Extend here component profiles, like openssl, apache, mariadb, curl...
# Or/and extend a stack (lamp, tomcat) that does most of the work for you
# In this example we extend from helloweb component.
# ../../component/component1/buildout.cfg
# ../../component/component2/buildout.cfg
../../component/helloweb/buildout.cfg
parts =
# Call installation of slapos.cookbook egg defined in stack/slapos.cfg (needed
# in 99,9% of Slapos Software Releases)
slapos-cookbook
# Call creation of instance.cfg file that will be called for deployment of
# instance
instance-profile
# build helloweb programs
helloweb-python
helloweb-ruby
helloweb-go
[python]
part = python3
[gowork]
golang = ${golang1.17:location}
# Macro for jinja templates. The filename is set in buildout.hash.cfg
# in the section using this template
[jinja-template]
recipe = slapos.recipe.template:jinja2
url = ${:_profile_base_location_}/${:filename}
# Download instance.cfg.in (buildout profile used to deployment of instance),
# replace all ${foo:bar} parameters by real values, and change $${foo:bar} to
# ${foo:bar}
# The recipe, template and mode are fetched from jijna-template
[instance-profile]
# The <= is buildout verb to use defined template
<= jinja-template
output = ${buildout:directory}/instance.cfg
extensions = jinja2.ext.do
context =
section buildout buildout
raw template_monitor ${monitor2-template:output}
# md5sum is fetched from buildout.hash.cfg and can be recalculated automatically by
# calling update-hash
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/helloworld/test/ 0000775 0000000 0000000 00000000000 14241130220 0026711 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/helloworld/test/README.md 0000664 0000000 0000000 00000000046 14241130220 0030170 0 ustar 00root root 0000000 0000000 Tests for HelloWorld software release
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/helloworld/test/setup.py 0000664 0000000 0000000 00000003660 14241130220 0030430 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2019 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.helloworld'
with open("README.md") as f:
long_description = f.read()
setup(
name=name,
version=version,
description="Test for SlapOS' helloworld",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'slapos.libnetworkcache',
'erp5.util',
'requests',
],
zip_safe=True,
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/helloworld/test/test.py 0000664 0000000 0000000 00000004653 14241130220 0030252 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2019 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import os
import requests
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'software.cfg')))
class HelloWorldTestCase(SlapOSInstanceTestCase):
# to be defined by subclasses
name = None
kind = None
@classmethod
def getInstanceParameterDict(cls):
return {"name": cls.name}
class HTTPRequestTestMixin(object):
"""Test that the service url.${kind} responds Hello ${name}
"""
def test_get(self):
url = self.computer_partition.getConnectionParameterDict()['url.{}'.format(
self.kind)]
response = requests.get(url)
self.assertEqual(requests.codes['OK'], response.status_code)
self.assertTrue(
response.text.startswith("Hello {}".format(self.name)), response.text)
class TestPython(HelloWorldTestCase, HTTPRequestTestMixin):
name = "Python"
kind = "python"
class TestRuby(HelloWorldTestCase, HTTPRequestTestMixin):
name = "Ruby"
kind = "ruby"
class TestGolang(HelloWorldTestCase, HTTPRequestTestMixin):
name = "Go"
kind = "go"
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as-base/ 0000775 0000000 0000000 00000000000 14241130220 0026044 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as-base/README.md 0000664 0000000 0000000 00000000746 14241130220 0027332 0 ustar 00root root 0000000 0000000 # Html5 Application Server #
## Presentation ##
* Fast hosting software for static website (html5)
* Use Nginx server
## Parameter ##
download_url (string) :required
Details :
* Only tarball (tar) is supported
* Compressed format is gunzip (optional)
* Tarball must contain an index.html at its root
## How it works ##
Each time you (re)start your instance or update parameters, html5as will remove previous website then download tarball and extract it in docroot directory.
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as-base/instance.cfg.in 0000664 0000000 0000000 00000002153 14241130220 0030737 0 ustar 00root root 0000000 0000000 [buildout]
parts =
switch-softwaretype
eggs-directory = {{ buildout['eggs-directory'] }}
develop-eggs-directory = {{ buildout['develop-eggs-directory'] }}
offline = true
[profile-common]
nginx_location = {{ nginx_location }}
dash_location = {{ dash_location }}
template_nginx_conf = {{ template_nginx_conf_target }}
template_mime_types = {{ template_mime_types_target }}
template_launcher = {{ template_launcher_target }}
[instance-html5as]
recipe = slapos.recipe.template:jinja2
url = {{ template_instance_html5as_target }}
output = ${buildout:directory}/${:filename}
filename = instance-html5as.cfg
context =
section buildout buildout
section parameter_list profile-common
# partition_ipv6 is the random ipv6 allocated to the local partition
key partition_ipv6 slap-configuration:ipv6-random
[switch-softwaretype]
recipe = slapos.cookbook:switch-softwaretype
RootSoftwareInstance = ${:default}
default = instance-html5as:output
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
instance_html5as.cfg.in 0000664 0000000 0000000 00000006565 14241130220 0032330 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as-base #############################
#
# Deploy html5as instance
#
#############################
[buildout]
parts =
nginx_conf
downloader
mime_types
launcher
publish-connection-information
# Define egg directories to be the one from Software Release
# (/opt/slapgrid/...)
# Always the same.
eggs-directory = {{ buildout['eggs-directory'] }}
develop-eggs-directory = {{ buildout['develop-eggs-directory'] }}
offline = true
# partition tree
# /
# |- etc/
# | |- nginx.conf
# | |- run/
# | |- html5as (binary)
# |- var/
# | |- run/
# | | |- nginx.pid
# | |- log/
# | | |- nginx.log
# | | |- nginx.access.log
# |- srv/
# | |- html5as/ (doc root)
# | | |- index.html
# | |- backup/
# Create all needed directories, depending on your needs
[directory]
recipe = slapos.cookbook:mkdirectory
home = ${buildout:directory}
etc = ${:home}/etc
var = ${:home}/var
srv = ${:home}/srv
[basedirectory]
recipe = slapos.cookbook:mkdirectory
# Executables put here will be started but not monitored (for startup scripts)
script = ${directory:etc}/run
# Executables put here will be started and monitored (for daemons)
service = ${directory:etc}/service
log = ${directory:var}/log
run = ${directory:var}/run
backup = ${directory:srv}/backup
data = ${directory:srv}/html5as
[tempdirectory]
recipe = slapos.cookbook:mkdirectory
tmp = ${directory:home}/tmp
client_body_temp_path = ${:tmp}/client_body_temp_path
proxy_temp_path = ${:tmp}/proxy_temp_path
fastcgi_temp_path = ${:tmp}/fastcgi_temp_path
uwsgi_temp_path = ${:tmp}/uwsgi_temp_path
scgi_temp_path = ${:tmp}/scgi_temp_path
# List of options for html5as configuration
# It will run a simple nginx serving the content of srv/html5as
[html5as]
# Options
nb_workers = 2
# Network
ip = {{ partition_ipv6 }}
port = 8081
access_url = http://[${:ip}]:${:port}
# Paths
# Log
path_pid = ${basedirectory:run}/nginx.pid
path_log = ${basedirectory:log}/nginx.log
path_access_log = ${basedirectory:log}/nginx.access.log
path_error_log = ${basedirectory:log}/nginx.error.log
path_tmp = ${tempdirectory:tmp}
# Docroot
docroot = ${basedirectory:data}
default_index = ${basedirectory:data}/index.html
# Config files
path_nginx_conf = ${directory:etc}/nginx.conf
path_mime_types = ${directory:etc}/mime_types
# Binaries
path_shell = {{ parameter_list['dash_location'] }}/bin/dash
# Executables
bin_launcher = ${basedirectory:script}/launcher
# Utils
path_nginx = {{ parameter_list['nginx_location'] }}/sbin/nginx
# Render nginx conf
[nginx_conf]
recipe = slapos.recipe.template:jinja2
url = {{ parameter_list['template_nginx_conf'] }}
output = ${html5as:path_nginx_conf}
context =
section param_html5as html5as
section param_tempdir tempdirectory
# Render necessary mime types file for nginx
[mime_types]
recipe = slapos.recipe.template
url = {{ parameter_list['template_mime_types'] }}
output = ${html5as:path_mime_types}
# Render the script launching nginx
[launcher]
recipe = slapos.recipe.template:jinja2
url = {{ parameter_list['template_launcher'] }}
output = ${html5as:bin_launcher}
context =
section param_html5as html5as
# Simple command to put content in the docroot
[downloader]
recipe = plone.recipe.command
command = rm -r ${html5as:docroot}/*; echo "Hello World!" > ${html5as:docroot}/index.html
# Publish nginx address
[publish-connection-information]
recipe = slapos.cookbook:publish
server_url = ${html5as:access_url}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as-base/software.cfg 0000664 0000000 0000000 00000004545 14241130220 0030367 0 ustar 00root root 0000000 0000000 [buildout]
extends =
# "slapos" stack describes basic things needed for 99.9% of SlapOS Software
# Releases
../../stack/slapos.cfg
# Extend here component profiles, like openssl, apache, mariadb, curl...
# Or/and extend a stack (lamp, tomcat) that does most of the work for you
# In this example we extend needed components for html5as.
../../component/nginx/buildout.cfg
../../component/dash/buildout.cfg
parts =
# Call installation of slapos.cookbook egg defined in stack/slapos.cfg (needed
# in 99,9% of Slapos Software Releases)
slapos-cookbook
# Call creation of instance.cfg file that will be called for deployment of
# instance
template-cfg
# Add extra egg
extra-eggs
# Download instance.cfg.in (buildout profile used to deployment of instance),
# replace all {{ foo_bar }} parameters by real values
# The recipe, template and mode are fetched from jijna-template
[template-cfg]
recipe = slapos.recipe.template:jinja2
output = ${buildout:directory}/template.cfg
url = ${:_profile_base_location_}/${:filename}
filename = instance.cfg.in
md5sum = 861e7ce5d65252067d31c7325c97917d
context =
section buildout buildout
key nginx_location nginx:location
key dash_location dash:location
key template_nginx_conf_target template_nginx_conf:target
key template_mime_types_target template_mime_types:target
key template_launcher_target template_launcher:target
key template_instance_html5as_target instance_html5as:target
# Download instance_html5as.cfg.in
[instance_html5as]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:_update_hash_filename_}
_update_hash_filename_ = instance_html5as.cfg.in
md5sum = 2c2bcd723694bc88df13c139c96e8bad
[template_nginx_conf]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:_update_hash_filename_}
_update_hash_filename_ = templates/nginx_conf.in
md5sum = 61dc4c82bf48563228ce4dea6c5c6319
[template_launcher]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:_update_hash_filename_}
_update_hash_filename_ = templates/launcher.in
md5sum = 6cb0d64905ae7fc67277c1bf76b86875
[template_mime_types]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:_update_hash_filename_}
_update_hash_filename_ = templates/mime_types.in
md5sum = 4ef94a7b458d885cd79ba0b930a5727e
[extra-eggs]
recipe = zc.recipe.egg
eggs =
plone.recipe.command
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as-base/templates/ 0000775 0000000 0000000 00000000000 14241130220 0030042 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as-base/templates/launcher.in0000664 0000000 0000000 00000000341 14241130220 0032171 0 ustar 00root root 0000000 0000000 #! {{ param_html5as['path_shell'] }}
# BEWARE: This file is operated by slapos node
# BEWARE: It will be overwritten automatically
# Run nginx
exec {{ param_html5as['path_nginx'] }} -c {{ param_html5as['path_nginx_conf'] }}
mime_types.in 0000664 0000000 0000000 00000004013 14241130220 0032464 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as-base/templates types {
text/html html htm shtml;
text/css css;
text/xml xml rss;
image/gif gif;
image/jpeg jpeg jpg;
application/x-javascript js;
application/atom+xml atom;
text/mathml mml;
text/plain txt;
text/vnd.sun.j2me.app-descriptor jad;
text/vnd.wap.wml wml;
text/x-component htc;
image/png png;
image/tiff tif tiff;
image/vnd.wap.wbmp wbmp;
image/x-icon ico;
image/x-jng jng;
image/x-ms-bmp bmp;
image/svg+xml svg svgz;
application/java-archive jar war ear;
application/mac-binhex40 hqx;
application/msword doc;
application/pdf pdf;
application/postscript ps eps ai;
application/rtf rtf;
application/vnd.ms-excel xls;
application/vnd.ms-powerpoint ppt;
application/vnd.wap.wmlc wmlc;
application/vnd.google-earth.kml+xml kml;
application/vnd.google-earth.kmz kmz;
application/x-7z-compressed 7z;
application/x-cocoa cco;
application/x-java-archive-diff jardiff;
application/x-java-jnlp-file jnlp;
application/x-makeself run;
application/x-perl pl pm;
application/x-pilot prc pdb;
application/x-rar-compressed rar;
application/x-redhat-package-manager rpm;
application/x-sea sea;
application/x-shockwave-flash swf;
application/x-stuffit sit;
application/x-tcl tcl tk;
application/x-x509-ca-cert der pem crt;
application/x-xpinstall xpi;
application/xhtml+xml xhtml;
application/zip zip;
application/octet-stream bin exe dll;
application/octet-stream deb;
application/octet-stream dmg;
application/octet-stream eot;
application/octet-stream iso img;
application/octet-stream msi msp msm;
application/ogg ogx;
audio/midi mid midi kar;
audio/mpeg mpga mpega mp2 mp3 m4a;
audio/ogg oga ogg spx;
audio/x-realaudio ra;
audio/webm weba;
video/3gpp 3gpp 3gp;
video/mp4 mp4;
video/mpeg mpeg mpg mpe;
video/ogg ogv;
video/quicktime mov;
video/webm webm;
video/x-flv flv;
video/x-mng mng;
video/x-ms-asf asx asf;
video/x-ms-wmv wmv;
video/x-msvideo avi;
}
nginx_conf.in 0000664 0000000 0000000 00000001720 14241130220 0032443 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as-base/templates worker_processes {{ param_html5as['nb_workers'] }};
pid {{ param_html5as['path_pid'] }};
error_log {{ param_html5as['path_error_log'] }};
daemon off;
events {
worker_connections 1024;
accept_mutex off;
}
http {
include {{ param_html5as['path_mime_types'] }};
default_type application/octet-stream;
types_hash_bucket_size 64;
access_log {{ param_html5as['path_access_log'] }} combined;
index index.html;
server {
listen [{{ param_html5as['ip'] }}]:{{ param_html5as['port'] }};
server_name _;
keepalive_timeout 5;
client_body_temp_path {{ param_tempdir['client_body_temp_path'] }};
proxy_temp_path {{ param_tempdir['proxy_temp_path'] }};
fastcgi_temp_path {{ param_tempdir['fastcgi_temp_path'] }};
uwsgi_temp_path {{ param_tempdir['uwsgi_temp_path'] }};
scgi_temp_path {{ param_tempdir['scgi_temp_path'] }};
# path for static files
root {{ param_html5as['docroot'] }};
}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as-base/test/ 0000775 0000000 0000000 00000000000 14241130220 0027023 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as-base/test/README.md 0000664 0000000 0000000 00000000050 14241130220 0030275 0 ustar 00root root 0000000 0000000 Tests for html5as-base software release
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as-base/test/setup.py 0000664 0000000 0000000 00000003663 14241130220 0030545 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2019 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.html5asbase'
with open("README.md") as f:
long_description = f.read()
setup(
name=name,
version=version,
description="Test for SlapOS' HTML5AS Base",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'slapos.libnetworkcache',
'erp5.util',
'requests',
],
zip_safe=True,
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as-base/test/test.py 0000664 0000000 0000000 00000005172 14241130220 0030361 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2021 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import os
import requests
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'software.cfg')))
class HTML5ASBaseTestCase(SlapOSInstanceTestCase):
"""
Common class for testing html5as base.
It inherit from SlapOSInstanceTestCase which:
* Install the software release.
* Checks it compile without issue.
* Deploy the instance
* Check deployement works and promise pass
For testing the deployement a different testing class will need to be set up
per each variation of parameters the instance needs to be given.
"""
def checkUrlAndGetResponse(self, url):
"""
Common class to check an url and return the response
"""
response = requests.get(url)
self.assertEqual(requests.codes['OK'], response.status_code)
return response
class TestEmptyDeploy(HTML5ASBaseTestCase):
"""
This class test the instance with no parameters.
"""
def test_deploy_with_no_paramater(self):
"""
Get the connection URL and check it is accessible
"""
url = self.requestDefaultInstance().getConnectionParameterDict()['server_url']
response = self.checkUrlAndGetResponse(url)
result = response.text
self.assertEqual("Hello World!\n", result) slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/ 0000775 0000000 0000000 00000000000 14241130220 0025134 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/README.md 0000664 0000000 0000000 00000000746 14241130220 0026422 0 ustar 00root root 0000000 0000000 # Html5 Application Server #
## Presentation ##
* Fast hosting software for static website (html5)
* Use Nginx server
## Parameter ##
download_url (string) :required
Details :
* Only tarball (tar) is supported
* Compressed format is gunzip (optional)
* Tarball must contain an index.html at its root
## How it works ##
Each time you (re)start your instance or update parameters, html5as will remove previous website then download tarball and extract it in docroot directory.
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/buildout.hash.cfg 0000664 0000000 0000000 00000003212 14241130220 0030364 0 ustar 00root root 0000000 0000000 # To learn more about how to generate this file read
# ../../README.update-hash.rst
# THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[template-cfg]
filename = instance.cfg.in
md5sum = ef264514b64a4b2c77c9965c587c6d34
[instance_html5as]
_update_hash_filename_ = instance_html5as.cfg.in
md5sum = a48e0026e2b949cc334efcb21478a6ed
[template_nginx_conf]
_update_hash_filename_ = templates/nginx_conf.in
md5sum = 61dc4c82bf48563228ce4dea6c5c6319
[template_launcher]
_update_hash_filename_ = templates/launcher.in
md5sum = 6cb0d64905ae7fc67277c1bf76b86875
[template_mime_types]
_update_hash_filename_ = templates/mime_types.in
md5sum = 4ef94a7b458d885cd79ba0b930a5727e
[template_index_html]
_update_hash_filename_ = templates/index.html.in
md5sum = d57cb01df5941e139b02a2f7bdabcdc8
[template_graceful]
_update_hash_filename_ = templates/graceful.in
md5sum = 1c0ee16966e1fcdb3fd11c09f12ee2b2
[template_instance_replicate]
_update_hash_filename_ = instance_replicate.cfg.in
md5sum = 7ff7e11d05145115f53564ec1af205ef
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/instance.cfg.in 0000664 0000000 0000000 00000004154 14241130220 0030032 0 ustar 00root root 0000000 0000000 [buildout]
parts =
switch-softwaretype
eggs-directory = {{ buildout['eggs-directory'] }}
develop-eggs-directory = {{ buildout['develop-eggs-directory'] }}
offline = true
[profile-common]
nginx_location = {{ nginx_location }}
dash_location = {{ dash_location }}
template_nginx_conf = {{ template_nginx_conf_target }}
template_mime_types = {{ template_mime_types_target }}
template_launcher = {{ template_launcher_target }}
template_index_html = {{ template_index_html_target }}
template_graceful = {{ template_graceful_target }}
template_monitor = {{ template_monitor }}
[instance-html5as]
recipe = slapos.recipe.template:jinja2
url = {{ template_instance_html5as_target }}
output = ${buildout:directory}/${:filename}
filename = instance-html5as.cfg
context =
section buildout buildout
section parameter_list profile-common
# partition_ipv6 is the random ipv6 allocated to the local partition
key partition_ipv6 slap-configuration:ipv6-random
# slapparameter_dict: dictionary of all parameters
key slapparameter_dict slap-configuration:configuration
jsonkey default_parameter_dict :default-parameters
default-parameters =
{
"title": "",
"download_url": null,
"port": 8081,
"monitor-httpd-port": 8197
}
[instance-replicate]
recipe = slapos.recipe.template:jinja2
extensions = jinja2.ext.do
url = {{ template_instance_replicate }}
output = ${buildout:directory}/${:filename}
filename = instance-replicate-html5as.cfg
context =
section buildout buildout
section parameter_list profile-common
key slapparameter_dict slap-configuration:configuration
jsonkey default_parameter_dict :default-parameters
default-parameters =
{
"download_url": null,
"replicate-quantity": 1
}
[switch-softwaretype]
recipe = slapos.cookbook:switch-softwaretype
RootSoftwareInstance = ${:default}
default = instance-html5as:output
replicate = instance-replicate:output
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/instance_html5as.cfg.in 0000664 0000000 0000000 00000017143 14241130220 0031471 0 ustar 00root root 0000000 0000000 #############################
#
# Deploy html5as instance
#
#############################
# parameter_dict: a dictionary with the default parameters from instance.cfg.in
# replaces the values with the parameters of the instance request if there are any
{% set parameter_dict = dict(default_parameter_dict, **slapparameter_dict) %}
[buildout]
parts =
nginx_conf
mime_types
launcher
nginx-graceful
port-listening-promise
logrotate-entry-nginx
publish-connection-information
# Define egg directories to be the one from Software Release
# (/opt/slapgrid/...)
# Always the same.
eggs-directory = {{ buildout['eggs-directory'] }}
develop-eggs-directory = {{ buildout['develop-eggs-directory'] }}
offline = true
# Instance profile extends monitoring stack
extends = {{ parameter_list['template_monitor'] }}
# partition tree
# /
# |- etc/
# | |- nginx.conf
# | |- run/
# | |- html5as (binary)
# |- var/
# | |- run/
# | | |- nginx.pid
# | |- log/
# | | |- nginx.log
# | | |- nginx.access.log
# |- srv/
# | |- html5as/ (doc root)
# | | |- index.html
# | |- backup/
# Create all needed directories, depending on your needs
[directory]
recipe = slapos.cookbook:mkdirectory
home = ${buildout:directory}
etc = ${:home}/etc
var = ${:home}/var
srv = ${:home}/srv
[basedirectory]
recipe = slapos.cookbook:mkdirectory
# Executables put here will be started but not monitored (for startup scripts)
script = ${directory:etc}/run
# Executables put here will be started and monitored (for daemons)
service = ${directory:etc}/service
log = ${directory:var}/log
run = ${directory:var}/run
backup = ${directory:srv}/backup
[tempdirectory]
recipe = slapos.cookbook:mkdirectory
tmp = ${directory:home}/tmp
client_body_temp_path = ${:tmp}/client_body_temp_path
proxy_temp_path = ${:tmp}/proxy_temp_path
fastcgi_temp_path = ${:tmp}/fastcgi_temp_path
uwsgi_temp_path = ${:tmp}/uwsgi_temp_path
scgi_temp_path = ${:tmp}/scgi_temp_path
# List of options for html5as configuration
# It will run a simple nginx serving the content of srv/html5as
[html5as]
# Options
nb_workers = 2
# Network
ip = {{ partition_ipv6 }}
port = {{ parameter_dict['port'] }}
access_url = http://[${:ip}]:${:port}
# Paths
# Log
path_pid = ${basedirectory:run}/nginx.pid
path_log = ${basedirectory:log}/nginx.log
path_access_log = ${basedirectory:log}/nginx.access.log
path_error_log = ${basedirectory:log}/nginx.error.log
path_tmp = ${tempdirectory:tmp}
# Docroot
docroot = ${downloader:location}
default_index = ${:docroot}/index.html
# Config files
path_nginx_conf = ${directory:etc}/nginx.conf
path_mime_types = ${directory:etc}/mime_types
# Binaries
path_shell = {{ parameter_list['dash_location'] }}/bin/dash
# Executables
bin_launcher = ${basedirectory:service}/launcher
# Utils
path_nginx = {{ parameter_list['nginx_location'] }}/sbin/nginx
# Render nginx conf
[nginx_conf]
recipe = slapos.recipe.template:jinja2
url = {{ parameter_list['template_nginx_conf'] }}
output = ${html5as:path_nginx_conf}
context =
section param_html5as html5as
section param_tempdir tempdirectory
# Render necessary mime types file for nginx
[mime_types]
recipe = slapos.recipe.template
url = {{ parameter_list['template_mime_types'] }}
output = ${html5as:path_mime_types}
# Render the script launching nginx
[launcher]
recipe = slapos.recipe.template:jinja2
url = {{ parameter_list['template_launcher'] }}
output = ${html5as:bin_launcher}
context =
section param_html5as html5as
# Command to put content in the docroot
[downloader]
recipe = slapos.recipe.build
# Path where the recipe stores any produced file,
# it will be automatically removed at the beginning of "install".
location = ${directory:srv}/html5as
# All the keys in this section will be available as a dict called "self.options"
# We add: or '', otherwise jinja2 will render a 'None' string
url = {{ parameter_dict['download_url'] or '' }}
default_index_html = ${default_index_html:output}
# If a tarball is passed as a parameter in download url
# it's content will be served by the instance.
# If the parameter is not provided it fallback to the default template
install =
import os, shutil
buildout_offline = self.buildout['buildout']['offline']
try:
# Allow to do self.download() which can only be used in "online" mode
self.buildout['buildout']['offline'] = 'false'
if self.options['url']:
# Use fonctions from the slapos.recipe.build repository
# Download a file from a URL to a temporary path
file = self.download(self.options['url'])
# Create a directory and extract the file that are compressed inside
extract_dir = self.extract(file)
# Return the right directory path
workdir = guessworkdir(extract_dir)
# Recursively copy directory
self.copyTree(workdir, location)
else:
# Create directory and copy the default template inside
os.makedirs(location)
shutil.copy(self.options['default_index_html'], location)
finally:
# reset the parameter
self.buildout['buildout']['offline'] = buildout_offline
[default_index_html]
recipe = slapos.recipe.template:jinja2
url = {{ parameter_list['template_index_html'] }}
output = ${directory:srv}/index.html
title = {{ parameter_dict['title'] }}
context =
key title :title
### Nginx Graceful
[nginx-graceful]
recipe = slapos.recipe.template:jinja2
url = {{ parameter_list['template_graceful'] }}
output = ${basedirectory:script}/nginx-graceful
context =
section param_html5as html5as
# Port Listening checking promise
[port-listening-promise]
<= monitor-promise-base
promise = check_socket_listening
name = nginx-port-listening.py
config-host = ${html5as:ip}
config-port = ${html5as:port}
# Use a port different from the default one in order to be able to
# use it in a SlapOS webrunner or a Theia SlapOS Runner
[monitor-instance-parameter]
monitor-httpd-port = {{ parameter_dict['monitor-httpd-port'] }}
# Monitor Stack also provides logrotate stack. We only need to extend
# the logrotate-entry-base defined in instance-logrotate-base.cfg.in .
# More parameters can be added following the logrotate-entry-base section
[logrotate-entry-nginx]
<= logrotate-entry-base
name = nginx
log = ${html5as:path_access_log} ${html5as:path_error_log}
post = kill -USR1 $(cat ${html5as:path_pid})
# Publish nginx address
[publish-connection-information]
recipe = slapos.cookbook:publish
# By extending monitor publish, all the section deploying monitoring will
# be deployed. The parameters needed for accessing monitoring will be published
<= monitor-publish
server_url = ${html5as:access_url}
title = Title {{ parameter_dict['title'] }}!
# Add dependency to the promise so that frontend sections are processed
# and there is no need to declare the new part in buildout:parts
server-cdn-url = ${html5as-frontend-promise:url}
# Request a CDN entry to master
[html5as-frontend]
# Extend slap-connnection to get the credentials for the request
<= slap-connection
# Recipe used to make requests
recipe = slapos.cookbook:requestoptional
name = HTML5AS frontend
# Specify the software url of the frontend software release
software-url = http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg
# It is not a dedicated instance but an instance allocated on a shared instance
slave = true
config-url = ${html5as:access_url}
config-https-only = true
# The parameter expected to be received from the request are listed here.
return = domain secure_access
# Add a promise to make sure the cdn is properly configured
[html5as-frontend-promise]
<= monitor-promise-base
promise = check_url_available
name = html5as-http-frontend.py
url = ${html5as-frontend:connection-secure_access}
config-url = ${:url}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/instance_replicate.cfg.in 0000664 0000000 0000000 00000004374 14241130220 0032066 0 ustar 00root root 0000000 0000000 {%- set parameter_dict = dict(default_parameter_dict, **slapparameter_dict) %}
{%- set replicate_quantity = int(parameter_dict['replicate-quantity']) %}
# Set default title and port for each replicate based on requested quantity
{%- for i in range(1, replicate_quantity + 1) %}
{%- do parameter_dict.setdefault("title-%d" % i, "") %}
{%- do parameter_dict.setdefault("port-%d" % i, 8081 + i) %}
{%- do parameter_dict.setdefault("monitor-httpd-port-%d" % i, 8197 + i) %}
{%- endfor %}
# Standard buildout section
[buildout]
parts =
publish-connection-information
eggs-directory = {{ buildout['eggs-directory'] }}
develop-eggs-directory = {{ buildout['develop-eggs-directory'] }}
offline = true
################################
# Sections to Request instances
################################
# Macro section sharing request parameters
[instance-request-base]
<= slap-connection
recipe = slapos.cookbook:request
# It is the same software as the current one
software-url = ${slap-connection:software-release-url}
# We want the default behaviour
software-type = default
# What parameter are neede to be retrieved
return = server_url server-cdn-url monitor-setup-url
# Provided parameters
# We add: or '', otherwise jinja2 will render a 'None' string
config-download_url = {{ parameter_dict['download_url'] or '' }}
# Create request section in a loop.
{% for i in range(1, replicate_quantity + 1) %}
# Request a normal html5as instance
[instance-{{ i }}]
<= instance-request-base
# Name of the instance
name = instance-html5as-{{ i }}
config-port = {{ parameter_dict["port-%s" % i] }}
config-title = {{ parameter_dict["title-%s" % i] }}
config-monitor-httpd-port = {{ parameter_dict["monitor-httpd-port-%s" % i] }}
{% if "sla-%s-computer-guid" % i in parameter_dict -%}
sla-computer_guid = {{ parameter_dict["sla-%s-computer-guid" % i] }}
{% endif -%}
{% endfor %}
# Publish information to connect to the two instances
[publish-connection-information]
recipe = slapos.cookbook:publish
{% for i in range(1, replicate_quantity + 1) %}
instance-{{ i }}-server_url = ${instance-{{ i }}:connection-server_url}
instance-{{ i }}-server-cdn-url = ${instance-{{ i }}:connection-server-cdn-url}
instance-{{ i }}-server-monitor-setup-url = ${instance-{{ i }}:connection-monitor-setup-url}
{% endfor %}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/software.cfg 0000664 0000000 0000000 00000005004 14241130220 0027446 0 ustar 00root root 0000000 0000000 [buildout]
extends =
# buildout.hash.cfg is used for automated hash calculation of managed
# instance files by calling update-hash
buildout.hash.cfg
# "slapos" stack describes basic things needed for 99.9% of SlapOS Software
# Releases
../../stack/slapos.cfg
# Extend monitoring stack to provide necessary tools for monitoring
../../stack/monitor/buildout.cfg
# Extend here component profiles, like openssl, apache, mariadb, curl...
# Or/and extend a stack (lamp, tomcat) that does most of the work for you
# In this example we extend needed components for html5as.
../../component/nginx/buildout.cfg
../../component/dash/buildout.cfg
parts =
# Call installation of slapos.cookbook egg defined in stack/slapos.cfg (needed
# in 99,9% of Slapos Software Releases)
slapos-cookbook
# Call creation of instance.cfg file that will be called for deployment of
# instance
template-cfg
# Download instance.cfg.in (buildout profile used to deployment of instance),
# replace all {{ foo_bar }} parameters by real values
# The recipe, template and mode are fetched from jijna-template
[template-cfg]
recipe = slapos.recipe.template:jinja2
output = ${buildout:directory}/template.cfg
url = ${:_profile_base_location_}/${:filename}
context =
section buildout buildout
key nginx_location nginx:location
key dash_location dash:location
key template_nginx_conf_target template_nginx_conf:target
key template_mime_types_target template_mime_types:target
key template_launcher_target template_launcher:target
key template_instance_html5as_target instance_html5as:target
key template_index_html_target template_index_html:target
key template_graceful_target template_graceful:target
key template_instance_replicate template_instance_replicate:target
# Monitor stack also provides a template for the instance
key template_monitor monitor2-template:output
# Have one shared section to define the default behaviour to download
# templates. Sections inheriting from this one won't need to redefine
# shared parameters
[download-base]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:_update_hash_filename_}
# Download instance_html5as.cfg.in
[instance_html5as]
# This section inherit from download-base
<= download-base
# Filename and md5sum is defined in buildout.hash.cfg
[template_nginx_conf]
<= download-base
[template_launcher]
<= download-base
[template_mime_types]
<= download-base
[template_index_html]
<= download-base
[template_graceful]
<= download-base
[template_instance_replicate]
<= download-base
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/templates/ 0000775 0000000 0000000 00000000000 14241130220 0027132 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/templates/graceful.in 0000664 0000000 0000000 00000000316 14241130220 0031252 0 ustar 00root root 0000000 0000000 #! {{ param_html5as['path_shell'] }}
# BEWARE: This file is operated by slapos node
# BEWARE: It will be overwritten automatically
# Run graceful
exec kill -s SIGHUP $(cat {{ param_html5as['path_pid'] }})
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/templates/index.html.in 0000664 0000000 0000000 00000000103 14241130220 0031526 0 ustar 00root root 0000000 0000000 {% if title %}
{{ title }}
{% endif %}
Hello World
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/templates/launcher.in 0000664 0000000 0000000 00000000341 14241130220 0031261 0 ustar 00root root 0000000 0000000 #! {{ param_html5as['path_shell'] }}
# BEWARE: This file is operated by slapos node
# BEWARE: It will be overwritten automatically
# Run nginx
exec {{ param_html5as['path_nginx'] }} -c {{ param_html5as['path_nginx_conf'] }}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/templates/mime_types.in 0000664 0000000 0000000 00000004013 14241130220 0031633 0 ustar 00root root 0000000 0000000 types {
text/html html htm shtml;
text/css css;
text/xml xml rss;
image/gif gif;
image/jpeg jpeg jpg;
application/x-javascript js;
application/atom+xml atom;
text/mathml mml;
text/plain txt;
text/vnd.sun.j2me.app-descriptor jad;
text/vnd.wap.wml wml;
text/x-component htc;
image/png png;
image/tiff tif tiff;
image/vnd.wap.wbmp wbmp;
image/x-icon ico;
image/x-jng jng;
image/x-ms-bmp bmp;
image/svg+xml svg svgz;
application/java-archive jar war ear;
application/mac-binhex40 hqx;
application/msword doc;
application/pdf pdf;
application/postscript ps eps ai;
application/rtf rtf;
application/vnd.ms-excel xls;
application/vnd.ms-powerpoint ppt;
application/vnd.wap.wmlc wmlc;
application/vnd.google-earth.kml+xml kml;
application/vnd.google-earth.kmz kmz;
application/x-7z-compressed 7z;
application/x-cocoa cco;
application/x-java-archive-diff jardiff;
application/x-java-jnlp-file jnlp;
application/x-makeself run;
application/x-perl pl pm;
application/x-pilot prc pdb;
application/x-rar-compressed rar;
application/x-redhat-package-manager rpm;
application/x-sea sea;
application/x-shockwave-flash swf;
application/x-stuffit sit;
application/x-tcl tcl tk;
application/x-x509-ca-cert der pem crt;
application/x-xpinstall xpi;
application/xhtml+xml xhtml;
application/zip zip;
application/octet-stream bin exe dll;
application/octet-stream deb;
application/octet-stream dmg;
application/octet-stream eot;
application/octet-stream iso img;
application/octet-stream msi msp msm;
application/ogg ogx;
audio/midi mid midi kar;
audio/mpeg mpga mpega mp2 mp3 m4a;
audio/ogg oga ogg spx;
audio/x-realaudio ra;
audio/webm weba;
video/3gpp 3gpp 3gp;
video/mp4 mp4;
video/mpeg mpeg mpg mpe;
video/ogg ogv;
video/quicktime mov;
video/webm webm;
video/x-flv flv;
video/x-mng mng;
video/x-ms-asf asx asf;
video/x-ms-wmv wmv;
video/x-msvideo avi;
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/templates/nginx_conf.in 0000664 0000000 0000000 00000001720 14241130220 0031612 0 ustar 00root root 0000000 0000000 worker_processes {{ param_html5as['nb_workers'] }};
pid {{ param_html5as['path_pid'] }};
error_log {{ param_html5as['path_error_log'] }};
daemon off;
events {
worker_connections 1024;
accept_mutex off;
}
http {
include {{ param_html5as['path_mime_types'] }};
default_type application/octet-stream;
types_hash_bucket_size 64;
access_log {{ param_html5as['path_access_log'] }} combined;
index index.html;
server {
listen [{{ param_html5as['ip'] }}]:{{ param_html5as['port'] }};
server_name _;
keepalive_timeout 5;
client_body_temp_path {{ param_tempdir['client_body_temp_path'] }};
proxy_temp_path {{ param_tempdir['proxy_temp_path'] }};
fastcgi_temp_path {{ param_tempdir['fastcgi_temp_path'] }};
uwsgi_temp_path {{ param_tempdir['uwsgi_temp_path'] }};
scgi_temp_path {{ param_tempdir['scgi_temp_path'] }};
# path for static files
root {{ param_html5as['docroot'] }};
}
}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/test/ 0000775 0000000 0000000 00000000000 14241130220 0026113 5 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/test/README.md 0000664 0000000 0000000 00000000043 14241130220 0027367 0 ustar 00root root 0000000 0000000 Tests for html5as software release
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/test/setup.py 0000664 0000000 0000000 00000003652 14241130220 0027633 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2019 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.html5as'
with open("README.md") as f:
long_description = f.read()
setup(
name=name,
version=version,
description="Test for SlapOS' HTML5AS",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'slapos.libnetworkcache',
'erp5.util',
'requests',
],
zip_safe=True,
test_suite='test',
)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/html5as/test/test.py 0000664 0000000 0000000 00000014344 14241130220 0027452 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (c) 2021 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import os
import requests
from urlparse import urlparse
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'software.cfg')))
class HTML5ASTestCase(SlapOSInstanceTestCase):
"""
Common class for testing html5as.
It inherit from SlapOSInstanceTestCase which:
* Install the software release.
* Checks it compile without issue.
* Deploy the instance
* Check deployement works and promise pass
For testing the deployement a different testing class will need to be set up
per each variation of parameters the instance needs to be given.
"""
def checkUrlAndGetResponse(self, url):
"""
Common class to check an url and return the response
"""
response = requests.get(url)
self.assertEqual(requests.codes['OK'], response.status_code)
return response
class TestEmptyDeploy(HTML5ASTestCase):
"""
This class test the instance with no parameters.
"""
def test_deploy_with_no_paramater(self):
url = self.requestDefaultInstance().getConnectionParameterDict()['server_url']
response = self.checkUrlAndGetResponse(url)
result = response.text
self.assertFalse("" in result)
self.assertTrue(" Hello World
" in result)
class TestDeployWithTitle(HTML5ASTestCase):
"""
This class test an instance with the parameter "title"
"""
@classmethod
def getInstanceParameterDict(cls):
return {
'title': 'Test1',
}
def test_deploy_with_title_parameter(self):
connection_parameter_dict = self.computer_partition.getConnectionParameterDict()
self.assertEqual(connection_parameter_dict["title"], "Title Test1!")
url = connection_parameter_dict['server_url']
response = self.checkUrlAndGetResponse(url)
result = response.text
self.assertTrue("Test1 " in result)
self.assertTrue("Hello World
" in result)
class TestGracefulWithPortChange(HTML5ASTestCase):
"""
This class test the instance with the parameter "port"
"""
instance_parameter_dict = {
'port': 8087
}
@classmethod
def getInstanceParameterDict(cls):
return cls.instance_parameter_dict
def test_change_port_parameter(self):
"""
This test test port change and its application with graceful restart
"""
# Check initial connection parameter match expected port
url = self.computer_partition.getConnectionParameterDict()['server_url']
self.assertEqual(urlparse(url).port, 8087)
# Check port is listening even thought it is duplicated with the promise:
# "port-listening-promise"
self.checkUrlAndGetResponse(url)
# Update port parameter
self.instance_parameter_dict.update({
'port': 8086,
})
# Request instance with the new port parameter
self.requestDefaultInstance()
# Reprocess the instance to apply new port and run promises
self.slap.waitForInstance(self.instance_max_retry)
# Rerequest instance to get update connection parameter
url = self.requestDefaultInstance().getConnectionParameterDict()['server_url']
# Make sure the new port is the one being used
self.assertEqual(urlparse(url).port, 8086)
# Check port is listening even thought it is duplicated with the promise:
# "port-listening-promise"
self.checkUrlAndGetResponse(url)
class TestReplicateHTML5AS(HTML5ASTestCase):
"""
This class test the instance with the parameter "port"
"""
instance_parameter_dict = {
"port-1": 8088,
"title-1": "Title 1",
}
@classmethod
def getInstanceSoftwareType(cls):
return 'replicate'
@classmethod
def getInstanceParameterDict(cls):
return cls.instance_parameter_dict
def test_replicate_instance(self):
# Check First instance is deployed with proper parameters
connection_parameter_dict = self.computer_partition.getConnectionParameterDict()
url = connection_parameter_dict['instance-1-server_url']
self.assertEqual(urlparse(url).port, 8088)
response = self.checkUrlAndGetResponse(url)
result = response.text
self.assertTrue("Title 1 " in result)
# Check only one instance is deployed by default
self.assertTrue("instance-2-server_url" not in connection_parameter_dict)
# Update replicate quantity parameter
self.instance_parameter_dict.update({
'replicate-quantity': 2,
'port-2': 8089,
'sla-2-computer_guid': self.slap._computer_id,
"title-2": "Title 314",
})
# Request instance with the one more replicate
self.requestDefaultInstance()
self.slap.waitForInstance(self.instance_max_retry)
# Check the second replicate
connection_parameter_dict = self.requestDefaultInstance().getConnectionParameterDict()
url = connection_parameter_dict['instance-2-server_url']
self.assertEqual(urlparse(url).port, 8089)
response = self.checkUrlAndGetResponse(url)
result = response.text
self.assertTrue("Title 314 " in result)
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/htmlvalidatorserver/ 0000775 0000000 0000000 00000000000 14241130220 0027660 5 ustar 00root root 0000000 0000000 buildout.hash.cfg 0000664 0000000 0000000 00000002112 14241130220 0033027 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/htmlvalidatorserver # THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[template-tomcat-configuration]
filename = server.xml.in
md5sum = fd2562b9ac0c52a2e9cc02e3ef3cef78
[template-tomcat-service]
filename = template-tomcat-service.sh.in
md5sum = 87781e6bcb523bb8434888d5f984f36c
[template-validator]
filename = instance-validator.cfg.in
md5sum = 3733c484371115831721341f9b65dc0f
[template]
filename = instance.cfg.in
md5sum = c86d4c6b5cc67139810399b1032c3410
instance-validator.cfg.in 0000664 0000000 0000000 00000004301 14241130220 0034454 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/htmlvalidatorserver ###############################
# Instanciate nvu
###############################
[basedirectory]
recipe = slapos.cookbook:mkdirectory
etc = $${buildout:directory}/etc
bin = $${buildout:directory}/bin
srv = $${buildout:directory}/srv
var = $${buildout:directory}/var
run = $${:var}/run
log = $${:var}/log
# scripts = $${:etc}/run
services = $${:etc}/service
# tomcat directories
catalina_base = $${:var}/vnu
catalina_logs = $${:catalina_base}/logs
catalina_temp = $${:catalina_base}/temp
catalina_webapps = $${:catalina_base}/webapps
catalina_work = $${:catalina_base}/work
catalina_conf = $${:catalina_base}/conf
#################################
# Tomcat service
#################################
[keystore]
recipe = plone.recipe.command
command =
${java-re-8-output:keytool} \
-genkeypair \
-alias "tomcat" \
-keyalg RSA \
-keypass "$${:pass}" \
-dname "CN=Web Server,OU=Unit,O=Organization,L=City,S=State,C=Country" \
-keystore "$${:file}" \
-storepass "$${:pass}"
file = $${basedirectory:catalina_base}/.keystore
pass = insecure
[tomcat-service]
recipe = slapos.recipe.template
url = ${template-tomcat-service:output}
output = $${basedirectory:services}/tomcat
virtual-depends =
$${tomcat-configuration:ip}
[tomcat-configuration]
recipe = slapos.recipe.template
url = ${template-tomcat-configuration:output}
output = $${basedirectory:catalina_conf}/server.xml
ip = {{ partition_ipv6 }}
port = 8899
scheme = https
[tomcat-listen-promise]
<= monitor-promise-base
promise = check_socket_listening
name = tomcat_listen.py
config-host = $${tomcat-configuration:ip}
config-port = $${tomcat-configuration:port}
#################################
# Slapos publish
#################################
[publish-url]
recipe = slapos.cookbook:publish
<= monitor-publish
vnu-url = $${tomcat-configuration:scheme}://[$${tomcat-configuration:ip}]:$${tomcat-configuration:port}/
[monitor-instance-parameter]
monitor-httpd-port = 8333
# Add parts generated by template
[buildout]
extends =
${monitor-template:output}
parts =
publish-url
tomcat-service
tomcat-listen-promise
monitor-base
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
offline = true
instance.cfg.in 0000664 0000000 0000000 00000002027 14241130220 0032474 0 ustar 00root root 0000000 0000000 slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/htmlvalidatorserver [buildout]
parts =
switch-softwaretype
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
offline = true
[dynamic-template-validator]
recipe = slapos.recipe.template:jinja2
url = ${template-validator:output}
output = $${buildout:parts-directory}/$${:_buildout_section_name_}/$${:filename}
filename = instance-validator.cfg
context =
# partition_ipv6 is the random ipv6 allocated to the local partition
key partition_ipv6 slap-configuration:ipv6-random
[switch-softwaretype]
recipe = slapos.cookbook:switch-softwaretype
RootSoftwareInstance = $${:validator}
default = $${:validator}
validator = dynamic-template-validator:output
[slap-configuration]
# Fetches parameters defined in SlapOS Master for this instance.
# Always the same.
recipe = slapos.cookbook:slapconfiguration.serialised
computer = $${slap-connection:computer-id}
partition = $${slap-connection:partition-id}
url = $${slap-connection:server-url}
key = $${slap-connection:key-file}
cert = $${slap-connection:cert-file}
slapos-d81bc08ef5fa49003cf861d28f4a5ee73ff43581-software/software/htmlvalidatorserver/server.xml.in 0000664 0000000 0000000 00000002003 14241130220 0032310 0 ustar 00root root 0000000 0000000