Commit 9c96044c authored by Alain Takoudjou's avatar Alain Takoudjou

Update Release Candidate

parents fa3ac825 45ad5918
...@@ -171,15 +171,3 @@ script = ...@@ -171,15 +171,3 @@ script =
url, md5sum = self.options[guessPlatform()].split() url, md5sum = self.options[guessPlatform()].split()
extract_dir = self.extract(self.download(url, md5sum)) extract_dir = self.extract(self.download(url, md5sum))
shutil.copy(extract_dir + '/geckodriver', %(location)r) shutil.copy(extract_dir + '/geckodriver', %(location)r)
### BBB
# testnodes are still using erp5-util.0.4.50 and are not looking for firefox in path
# To be removed once testnode are updated to
# https://lab.node.vifib.com/nexedi/slapos/raw/1.0.73/software/erp5testnode/software.cfg
# or later
[firefox-wrapper]
location = ${firefox:location}/firefox-slapos
[geckodriver]
location = ${firefox:location}/geckodriver
...@@ -17,5 +17,5 @@ rpath = ${:library-dirs} ...@@ -17,5 +17,5 @@ rpath = ${:library-dirs}
[geolite2-country] [geolite2-country]
recipe = slapos.recipe.build:download-unpacked recipe = slapos.recipe.build:download-unpacked
url = http://geolite.maxmind.com/download/geoip/database/GeoLite2-Country.tar.gz#${:md5sum} url = http://geolite.maxmind.com/download/geoip/database/GeoLite2-Country.tar.gz#${:md5sum}
md5sum = e1389fb31cf83665418b0a83ba8b3147 md5sum = 4d4bf41db15f99b227a1009043963102
strip-top-level-dir = true strip-top-level-dir = true
{ {
"$schema": "http://json-schema.org/draft-04/schema#", "$schema": "http://json-schema.org/draft-07/schema#",
"description": "Parameters to instantiate ERP5", "description": "Parameters to instantiate ERP5",
"additionalProperties": false, "additionalProperties": false,
"definitions": {
"tcpv4port": {
"minimum": 0,
"maximum": 65535,
"type": "integer"
}
},
"properties": { "properties": {
"sla-dict": { "sla-dict": {
"description": "Where to request instances. Each key is a query string for criterions (e.g. \"computer_guid=foo\"), and each value is a list of partition references (note: Zope partitions reference must be prefixed with \"zope-\").", "description": "Where to request instances. Each key is a query string for criterions (e.g. \"computer_guid=foo\"), and each value is a list of partition references (note: Zope partitions reference must be prefixed with \"zope-\").",
...@@ -89,7 +96,7 @@ ...@@ -89,7 +96,7 @@
"software-type": { "software-type": {
"description": "Request a front-end slave instance of this software type.", "description": "Request a front-end slave instance of this software type.",
"default": "RootSoftwareInstance", "default": "RootSoftwareInstance",
"type": "object" "type": "string"
}, },
"virtualhostroot-http-port": { "virtualhostroot-http-port": {
"description": "Front-end slave http port. Port where http requests to frontend will be redirected.", "description": "Front-end slave http port. Port where http requests to frontend will be redirected.",
...@@ -183,14 +190,14 @@ ...@@ -183,14 +190,14 @@
"kumofs": { "kumofs": {
"description": "Persistent memcached service", "description": "Persistent memcached service",
"additionalProperties": { "additionalProperties": {
"$ref": "./instance-kumofs-schema.json#/properties" "$ref": "./instance-kumofs-schema.json"
}, },
"type": "object" "type": "object"
}, },
"memcached": { "memcached": {
"description": "Volatile memcached service", "description": "Volatile memcached service",
"additionalProperties": { "additionalProperties": {
"$ref": "./instance-kumofs-schema.json#/properties" "$ref": "./instance-kumofs-schema.json"
}, },
"type": "object" "type": "object"
}, },
...@@ -207,14 +214,14 @@ ...@@ -207,14 +214,14 @@
"smtp": { "smtp": {
"description": "Mail queuing and relay service", "description": "Mail queuing and relay service",
"additionalProperties": { "additionalProperties": {
"$ref": "./instance-smtp-schema.json#/properties" "$ref": "./instance-smtp-schema.json"
}, },
"type": "object" "type": "object"
}, },
"mariadb": { "mariadb": {
"description": "Relational database service", "description": "Relational database service",
"additionalProperties": { "additionalProperties": {
"$ref": "./instance-mariadb-schema.json#/properties" "$ref": "./instance-mariadb-schema.json"
}, },
"type": "object" "type": "object"
}, },
...@@ -261,25 +268,6 @@ ...@@ -261,25 +268,6 @@
"default": "/", "default": "/",
"type": "string" "type": "string"
}, },
"type": {
"description": "Storage type",
"enum": [
"zeo",
"neo"
],
"type": "string"
},
"server": {
"description": "Instantiate a server. If missing, 'storage-dict' must contain the necessary properties to mount the ZODB. For ZEO, the partition reference is 'zodb'. For NEO, they are 'neo-0', 'neo-1', ...",
"anyOf": [
{
"$ref": "./instance-zeo-schema.json"
},
{
"$ref": "../neoppod/instance-neo-input-schema.json"
}
]
},
"storage-dict": { "storage-dict": {
"description": "Storage configuration. For NEO, 'logfile' is automatically set (see https://lab.nexedi.com/nexedi/neoppod/blob/master/neo/client/component.xml for other settings).", "description": "Storage configuration. For NEO, 'logfile' is automatically set (see https://lab.nexedi.com/nexedi/neoppod/blob/master/neo/client/component.xml for other settings).",
"properties": { "properties": {
...@@ -293,8 +281,39 @@ ...@@ -293,8 +281,39 @@
"type": "string" "type": "string"
}, },
"type": "object" "type": "object"
} },
"type": true,
"server": true
}, },
"oneOf": [
{
"title": "zeo",
"properties": {
"type": {
"description": "Storage type",
"const": "zeo"
},
"server": {
"description": "Instantiate a server. If missing, 'storage-dict' must contain the necessary properties to mount the ZODB. The partition reference is 'zodb'.",
"$ref": "./instance-zeo-schema.json"
}
}
},
{
"title": "neo",
"properties": {
"type": {
"description": "Storage type",
"const": "neo"
},
"server": {
"description": "Instantiate a server. If missing, 'storage-dict' must contain the necessary properties to mount the ZODB. Partitions references are 'neo-0', 'neo-1', ...",
"$ref": "../neoppod/instance-neo-input-schema.json"
}
}
}
],
"additionalProperties": { "additionalProperties": {
"type": "string" "type": "string"
}, },
...@@ -341,7 +360,7 @@ ...@@ -341,7 +360,7 @@
} }
}, },
"additionalProperties": { "additionalProperties": {
"$ref": "../caucase/instance-caucase-input-schema.json#/properties" "$ref": "../caucase/instance-caucase-input-schema.json"
}, },
"type": "object" "type": "object"
} }
......
{ {
"$schema": "http://json-schema.org/draft-04/schema#", "$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"required": [ "required": [
"tcpv4-port" "tcpv4-port"
], ],
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
"required": [ "required": [
"tcpv4-port" "tcpv4-port"
], ],
"type": "object",
"properties": { "properties": {
"tcpv4-port": { "tcpv4-port": {
"allOf": [ "allOf": [
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
"required": [ "required": [
"tcpv4-port" "tcpv4-port"
], ],
"type": "object",
"properties": { "properties": {
"tcpv4-port": { "tcpv4-port": {
"allOf": [ "allOf": [
......
# monitor test
This software release is simply to run the test suite from `../monitor/test/setup.py`
Nexedi staff can see the results of this test from the test suite
`SLAPOS-MONITOR-TEST` in test result module.
# THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[template]
filename = instance.cfg.in
md5sum = 887a866f115c1e75b695379990e1709b
[buildout]
parts =
slapos-test-runner
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
offline = true
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration
computer = $${slap-connection:computer-id}
partition = $${slap-connection:partition-id}
url = $${slap-connection:server-url}
key = $${slap-connection:key-file}
cert = $${slap-connection:cert-file}
[download-source]
recipe = slapos.recipe.build:gitclone
git-executable = ${git:location}/bin/git
[slapos]
<= download-source
repository = ${slapos-repository:location}
[create-directory]
recipe = slapos.cookbook:mkdirectory
bin = $${buildout:directory}/bin
working-dir = $${buildout:directory}/tmp/
[slapos-test-runner]
recipe = slapos.cookbook:wrapper
wrapper-path = $${create-directory:bin}/runTestSuite
command-line =
${buildout:bin-directory}/runTestSuite
--python_interpreter=${buildout:bin-directory}/${eggs:interpreter}
--source_code_path_list=$${slapos:location}/software/monitor/test
# XXX slapos.cookbook:wrapper does not allow extending env, so we add some default $PATH entries ( not sure they are needed )
environment =
PATH=${buildout:bin-directory}:/usr/bin/:/bin/
LOCAL_IPV4=$${slap-configuration:ipv4-random}
GLOBAL_IPV6=$${slap-configuration:ipv6-random}
SLAPOS_TEST_WORKING_DIR=$${create-directory:working-dir}
[buildout]
extends =
../../../../component/git/buildout.cfg
../../../../stack/slapos.cfg
./buildout.hash.cfg
parts =
slapos-cookbook
eggs
template
[setup-develop-egg]
recipe = zc.recipe.egg:develop
[slapos.test.monitor-setup]
<= setup-develop-egg
egg = slapos.test.monitor
setup = ${slapos-repository:location}/software/monitor/test/
[erp5.util-setup]
<= setup-develop-egg
egg = erp5.util[testnode]
setup = ${erp5.util-repository:location}
[eggs]
recipe = zc.recipe.egg
eggs =
${slapos.test.monitor-setup:egg}
${erp5.util-setup:egg}
slapos.core
entry-points =
runTestSuite=erp5.util.testsuite:runTestSuite
scripts =
runTestSuite
slapos
interpreter=
python_for_test
[git-clone-repository]
recipe = slapos.recipe.build:gitclone
git-executable = ${git:location}/bin/git
forbid-download-cache = true
branch = master
[slapos-repository]
<= git-clone-repository
repository = https://lab.nexedi.com/nexedi/slapos.git
# XXX we need an unreleased ( 0.4.51 ) version of erp5.util runTestSuite
# later we can stop fetching it from git and just use egg
[erp5.util-repository]
<= git-clone-repository
repository = https://lab.nexedi.com/nexedi/erp5.git
revision = 69013fa0fb67501089c776ab5e75d7bbf2e0e3bc
[template]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/${:filename}
output = ${buildout:directory}/template.cfg
mode = 640
[versions]
# clear the version of tested eggs, to make sure we installed the developped ones
slapos.test.monitor =
erp5.util =
#erp5.util = 0.4.51
pyasn1 = 0.4.2
slapos.recipe.template = 4.3
pysftp = 0.2.9
Tests for monitor Software Release
##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
import glob
import os
version = '0.0.1.dev0'
name = 'slapos.test.monitor'
long_description = open("README.md").read()
setup(name=name,
version=version,
description="Test for SlapOS' monitor",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'slapos.libnetworkcache',
'erp5.util',
'supervisor',
'psutil',
],
zip_safe=True,
test_suite='test',
)
##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import os
import shutil
import urlparse
import tempfile
import requests
import socket
import StringIO
import subprocess
import json
import psutil
import utils
# for development: debugging logs and install Ctrl+C handler
if os.environ.get('DEBUG'):
import logging
logging.basicConfig(level=logging.DEBUG)
import unittest
unittest.installHandler()
class InstanceTestCase(utils.SlapOSInstanceTestCase):
@classmethod
def getSoftwareURLList(cls):
return (os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'software.cfg')), )
class ServicesTestCase(InstanceTestCase):
@staticmethod
def generateHashFromFiles(file_list):
import hashlib
hasher = hashlib.md5()
for path in file_list:
with open(path, 'r') as afile:
buf = afile.read()
hasher.update("%s\n" % len(buf))
hasher.update(buf)
hash = hasher.hexdigest()
return hash
def test_hashes(self):
hash_files = [
'software_release/buildout.cfg',
]
expected_process_names = [
'monitor-httpd-{hash}-on-watch',
'crond-{hash}',
]
supervisor = self.getSupervisorRPCServer().supervisor
process_names = [process['name']
for process in supervisor.getAllProcessInfo()]
hash_files = [os.path.join(self.computer_partition_root_path, path)
for path in hash_files]
for name in expected_process_names:
h = ServicesTestCase.generateHashFromFiles(hash_files)
expected_process_name = name.format(hash=h)
self.assertIn(expected_process_name, process_names)
##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import unittest
import os
import socket
from contextlib import closing
import logging
import StringIO
import xmlrpclib
import supervisor.xmlrpc
from erp5.util.testnode.SlapOSControler import SlapOSControler
from erp5.util.testnode.ProcessManager import ProcessManager
# Utility functions
def findFreeTCPPort(ip=''):
"""Find a free TCP port to listen to.
"""
family = socket.AF_INET6 if ':' in ip else socket.AF_INET
with closing(socket.socket(family, socket.SOCK_STREAM)) as s:
s.bind((ip, 0))
return s.getsockname()[1]
# TODO:
# - allow requesting multiple instances ?
class SlapOSInstanceTestCase(unittest.TestCase):
"""Install one slapos instance.
This test case install software(s) and request one instance during `setUpClass`
and destroy the instance during `tearDownClass`.
Software Release URL, Instance Software Type and Instance Parameters can be defined
on the class.
All tests from the test class will run with the same instance.
The following class attributes are available:
* `computer_partition`: the computer partition instance, implementing
`slapos.slap.interface.slap.IComputerPartition`.
* `computer_partition_root_path`: the path of the instance root directory.
"""
# Methods to be defined by subclasses.
@classmethod
def getSoftwareURLList(cls):
"""Return URL of software releases to install.
To be defined by subclasses.
"""
raise NotImplementedError()
@classmethod
def getInstanceParameterDict(cls):
"""Return instance parameters
To be defined by subclasses if they need to request instance with specific
parameters.
"""
return {}
@classmethod
def getInstanceSoftwareType(cls):
"""Return software type for instance, default "default"
To be defined by subclasses if they need to request instance with specific
software type.
"""
return "default"
# Utility methods.
def getSupervisorRPCServer(self):
"""Returns a XML-RPC connection to the supervisor used by slapos node
Refer to http://supervisord.org/api.html for details of available methods.
"""
# xmlrpc over unix socket https://stackoverflow.com/a/11746051/7294664
return xmlrpclib.ServerProxy(
'http://slapos-supervisor',
transport=supervisor.xmlrpc.SupervisorTransport(
None,
None,
# XXX hardcoded socket path
serverurl="unix://{working_directory}/inst/supervisord.socket".format(
**self.config)))
# Unittest methods
@classmethod
def setUpClass(cls):
"""Setup the class, build software and request an instance.
If you have to override this method, do not forget to call this method on
parent class.
"""
try:
cls.setUpWorkingDirectory()
cls.setUpConfig()
cls.setUpSlapOSController()
cls.runSoftwareRelease()
# XXX instead of "runSoftwareRelease", it would be better to be closer to slapos usage:
# cls.supplySoftwares()
# cls.installSoftwares()
cls.runComputerPartition()
# XXX instead of "runComputerPartition", it would be better to be closer to slapos usage:
# cls.requestInstances()
# cls.createInstances()
# cls.requestInstances()
except Exception:
cls.stopSlapOSProcesses()
raise
@classmethod
def tearDownClass(cls):
"""Tear down class, stop the processes and destroy instance.
"""
cls.stopSlapOSProcesses()
# Implementation
@classmethod
def stopSlapOSProcesses(cls):
if hasattr(cls, '_process_manager'):
cls._process_manager.killPreviousRun()
@classmethod
def setUpWorkingDirectory(cls):
"""Initialise the directories"""
cls.working_directory = os.environ.get(
'SLAPOS_TEST_WORKING_DIR',
os.path.join(os.path.dirname(__file__), '.slapos'))
# To prevent error: Cannot open an HTTP server: socket.error reported
# AF_UNIX path too long This `working_directory` should not be too deep.
# Socket path is 108 char max on linux
# https://github.com/torvalds/linux/blob/3848ec5/net/unix/af_unix.c#L234-L238
# Supervisord socket name contains the pid number, which is why we add
# .xxxxxxx in this check.
if len(cls.working_directory + '/inst/supervisord.socket.xxxxxxx') > 108:
raise RuntimeError('working directory ( {} ) is too deep, try setting '
'SLAPOS_TEST_WORKING_DIR'.format(cls.working_directory))
if not os.path.exists(cls.working_directory):
os.mkdir(cls.working_directory)
@classmethod
def setUpConfig(cls):
"""Create slapos configuration"""
cls.config = {
"working_directory": cls.working_directory,
"slapos_directory": cls.working_directory,
"log_directory": cls.working_directory,
"computer_id": 'slapos.test', # XXX
'proxy_database': os.path.join(cls.working_directory, 'proxy.db'),
'partition_reference': cls.__name__,
# "proper" slapos command must be in $PATH
'slapos_binary': 'slapos',
}
# Some tests are expecting that local IP is not set to 127.0.0.1
ipv4_address = os.environ.get('LOCAL_IPV4', '127.0.1.1')
ipv6_address = os.environ['GLOBAL_IPV6']
cls.config['proxy_host'] = cls.config['ipv4_address'] = ipv4_address
cls.config['ipv6_address'] = ipv6_address
cls.config['proxy_port'] = findFreeTCPPort(ipv4_address)
cls.config['master_url'] = 'http://{proxy_host}:{proxy_port}'.format(
**cls.config)
@classmethod
def setUpSlapOSController(cls):
"""Create the a "slapos controller" and supply softwares from `getSoftwareURLList`.
This is equivalent to:
slapos proxy start
for sr in getSoftwareURLList; do
slapos supply $SR $COMP
done
"""
cls._process_manager = ProcessManager()
# XXX this code is copied from testnode code
cls.slapos_controler = SlapOSControler(
cls.working_directory,
cls.config
)
slapproxy_log = os.path.join(cls.config['log_directory'], 'slapproxy.log')
logger = logging.getLogger(__name__)
logger.debug('Configured slapproxy log to %r', slapproxy_log)
cls.software_url_list = cls.getSoftwareURLList()
cls.slapos_controler.initializeSlapOSControler(
slapproxy_log=slapproxy_log,
process_manager=cls._process_manager,
reset_software=False,
software_path_list=cls.software_url_list)
# XXX we should check *earlier* if that pidfile exist and if supervisord
# process still running, because if developer started supervisord (or bugs?)
# then another supervisord will start and starting services a second time
# will fail.
cls._process_manager.supervisord_pid_file = os.path.join(
cls.slapos_controler.instance_root, 'var', 'run', 'supervisord.pid')
@classmethod
def runSoftwareRelease(cls):
"""Run all the software releases that were supplied before.
This is the equivalent of `slapos node software`.
The tests will be marked file if software building fail.
"""
logger = logging.getLogger()
logger.level = logging.DEBUG
stream = StringIO.StringIO()
stream_handler = logging.StreamHandler(stream)
logger.addHandler(stream_handler)
try:
cls.software_status_dict = cls.slapos_controler.runSoftwareRelease(
cls.config, environment=os.environ)
stream.seek(0)
stream.flush()
message = ''.join(stream.readlines()[-100:])
assert cls.software_status_dict['status_code'] == 0, message
finally:
logger.removeHandler(stream_handler)
del stream
@classmethod
def runComputerPartition(cls):
"""Instanciate the software.
This is the equivalent of doing:
slapos request --type=getInstanceSoftwareType --parameters=getInstanceParameterDict
slapos node instance
and return the slapos request instance parameters.
This can be called by tests to simulate re-request with different parameters.
"""
logger = logging.getLogger()
logger.level = logging.DEBUG
stream = StringIO.StringIO()
stream_handler = logging.StreamHandler(stream)
logger.addHandler(stream_handler)
if cls.getInstanceSoftwareType() != 'default':
raise NotImplementedError
instance_parameter_dict = cls.getInstanceParameterDict()
try:
cls.instance_status_dict = cls.slapos_controler.runComputerPartition(
cls.config,
cluster_configuration=instance_parameter_dict,
environment=os.environ)
stream.seek(0)
stream.flush()
message = ''.join(stream.readlines()[-100:])
assert cls.instance_status_dict['status_code'] == 0, message
finally:
logger.removeHandler(stream_handler)
del stream
# FIXME: similar to test node, only one (root) partition is really
# supported for now.
computer_partition_list = []
for i in range(len(cls.software_url_list)):
computer_partition_list.append(
cls.slapos_controler.slap.registerOpenOrder().request(
cls.software_url_list[i],
# This is how testnode's SlapOSControler name created partitions
partition_reference='testing partition {i}'.format(
i=i, **cls.config),
partition_parameter_kw=instance_parameter_dict))
# expose some class attributes so that tests can use them:
# the ComputerPartition instances, to getInstanceParameterDict
cls.computer_partition = computer_partition_list[0]
# the path of the instance on the filesystem, for low level inspection
cls.computer_partition_root_path = os.path.join(
cls.config['working_directory'],
'inst',
cls.computer_partition.getId())
...@@ -46,17 +46,42 @@ ...@@ -46,17 +46,42 @@
# #
# # The list of backends which apache should redirect to. # # The list of backends which apache should redirect to.
# "backend-list": [ # "backend-list": [
# # (port, unused, internal_scheme) # # (port, unused, internal_scheme, enable_authentication)
# (8000, _, "http://10.0.0.10:8001"), # (8000, _, "http://10.0.0.10:8001", True),
# (8002, _, "http://10.0.0.10:8003"), # (8002, _, "http://10.0.0.10:8003", False),
# ], # ],
#
# # The mapping of zope paths this apache should redirect to.
# # This is a Zope specific feature.
# # `enable_authentication` has same meaning as for `backend-list`.
# "zope-virtualhost-monster-backend-dict": {
# # {(ip, port): ( enable_authentication, {frontend_path: ( internal_scheme ) }, ) }
# ('[::1]', 8004): (
# True, {
# 'zope-1': 'http://10.0.0.10:8001',
# 'zope-2': 'http://10.0.0.10:8002',
# },
# ),
# },
# } # }
# #
# This sample of `parameter_dict` will make apache listening to : # This sample of `parameter_dict` will make apache listening to :
# - 0.0.0.0:8000 redirecting internaly to http://10.0.0.10:8001 # From to `backend-list`:
# - [::1]:8000 redirecting internaly to http://10.0.0.10:8001 # - 0.0.0.0:8000 redirecting internaly to http://10.0.0.10:8001 and
# - 0.0.0.0:8002 redirecting internaly to http://10.0.0.10:8003 # - [::1]:8000 redirecting internaly to http://10.0.0.10:8001
# - [::1]:8002 redirecting internaly to http://10.0.0.10:8003 # only accepting requests from clients who provide a valid SSL certificate trusted in `ca-cert`.
# - 0.0.0.0:8002 redirecting internaly to http://10.0.0.10:8003
# - [::1]:8002 redirecting internaly to http://10.0.0.10:8003
# accepting requests from any client.
#
# From zope-virtualhost-monster-backend-dict`:
# - [::1]:8004 with some path based rewrite-rules redirecting to:
# * http://10.0.0.10/8001 when path matches /zope-1(.*)
# * http://10.0.0.10/8002 when path matches /zope-2(.*)
# with some VirtualHostMonster rewrite rules so zope writes URLs with
# [::1]:8004 as server name.
# For more details, refer to
# https://docs.zope.org/zope2/zope2book/VirtualHosting.html#using-virtualhostroot-and-virtualhostbase-together
-#} -#}
LoadModule unixd_module modules/mod_unixd.so LoadModule unixd_module modules/mod_unixd.so
LoadModule access_compat_module modules/mod_access_compat.so LoadModule access_compat_module modules/mod_access_compat.so
......
...@@ -14,12 +14,12 @@ ...@@ -14,12 +14,12 @@
# not need these here). # not need these here).
[template-erp5] [template-erp5]
filename = instance-erp5.cfg.in filename = instance-erp5.cfg.in
md5sum = 92f1e522d8e7d0e9a9f7b4db63f092e4 md5sum = 6dfda47ff95a3fa768382d8002c8a780
[template-balancer] [template-balancer]
filename = instance-balancer.cfg.in filename = instance-balancer.cfg.in
md5sum = 697e43a020af5edee2151987abc8e7b4 md5sum = 3c7afbf160b943d2a10f527722792a23
[template-apache-backend-conf] [template-apache-backend-conf]
filename = apache-backend.conf.in filename = apache-backend.conf.in
md5sum = 516143f5e8a3032a7b7b82741d3a46b7 md5sum = e0a7b027cb52e5fa21ab64cfa7298f35
...@@ -98,6 +98,8 @@ ipv4 = {{ ipv4 }} ...@@ -98,6 +98,8 @@ ipv4 = {{ ipv4 }}
{% endif -%} {% endif -%}
{% set haproxy_dict = {} -%} {% set haproxy_dict = {} -%}
{% set apache_dict = {} -%} {% set apache_dict = {} -%}
{% set zope_virtualhost_monster_backend_dict = {} %}
{% set test_runner_url_dict = {} %} {# family_name => list of apache URLs #}
{% set next_port = itertools.count(slapparameter_dict['tcpv4-port']).next -%} {% set next_port = itertools.count(slapparameter_dict['tcpv4-port']).next -%}
{% for family_name, parameter_id_list in sorted( {% for family_name, parameter_id_list in sorted(
slapparameter_dict['zope-family-dict'].iteritems()) -%} slapparameter_dict['zope-family-dict'].iteritems()) -%}
...@@ -122,8 +124,27 @@ ipv6 = {{ zope_address.split(']:')[0][1:] }} ...@@ -122,8 +124,27 @@ ipv6 = {{ zope_address.split(']:')[0][1:] }}
{% set zope_effective_address = zope_address -%} {% set zope_effective_address = zope_address -%}
{% endif -%} {% endif -%}
{% do zope_family_address_list.append((zope_effective_address, maxconn, webdav)) -%} {% do zope_family_address_list.append((zope_effective_address, maxconn, webdav)) -%}
{# # Generate entries with rewrite rule for test runnners #}
{% set test_runner_backend_mapping = {} %}
{% set test_runner_apache_url_list = [] %}
{% set test_runner_external_port = next_port() %}
{% for i, (test_runner_internal_ip, test_runner_internal_port) in
enumerate(slapparameter_dict[parameter_id ~ '-test-runner-address-list']) %}
{% do test_runner_backend_mapping.__setitem__(
'unit_test_' ~ i,
'http://' ~ test_runner_internal_ip ~ ':' ~ test_runner_internal_port ) %}
{% do test_runner_apache_url_list.append(
'https://' ~ ipv4 ~ ':' ~ test_runner_external_port ~ '/unit_test_' ~ i ~ '/' ) %}
{% endfor %}
{% do zope_virtualhost_monster_backend_dict.__setitem__(
(ipv4, test_runner_external_port),
( ssl_authentication, test_runner_backend_mapping ) ) -%}
{% do test_runner_url_dict.__setitem__(family_name, test_runner_apache_url_list) -%}
{% endfor -%} {% endfor -%}
{% endfor -%} {% endfor -%}
{# Make rendering fail artificially if any family has no known backend. {# Make rendering fail artificially if any family has no known backend.
# This is useful as haproxy's hot-reconfiguration mechanism is # This is useful as haproxy's hot-reconfiguration mechanism is
# supervisord-incompatible. # supervisord-incompatible.
...@@ -162,6 +183,7 @@ extensions = jinja2.ext.do ...@@ -162,6 +183,7 @@ extensions = jinja2.ext.do
recipe = slapos.cookbook:wrapper recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:services}/haproxy wrapper-path = ${directory:services}/haproxy
command-line = "{{ parameter_dict['haproxy'] }}/sbin/haproxy" -f "${haproxy-cfg:rendered}" command-line = "{{ parameter_dict['haproxy'] }}/sbin/haproxy" -f "${haproxy-cfg:rendered}"
hash-files = ${haproxy-cfg:rendered}
{# TODO: build socat and wrap it as "${directory:bin}/haproxy-ctl" to connect to "${haproxy-cfg-parameter-dict:socket-path}" #} {# TODO: build socat and wrap it as "${directory:bin}/haproxy-ctl" to connect to "${haproxy-cfg-parameter-dict:socket-path}" #}
...@@ -173,6 +195,7 @@ crl = ${directory:apache-conf}/crl.pem ...@@ -173,6 +195,7 @@ crl = ${directory:apache-conf}/crl.pem
[apache-conf-parameter-dict] [apache-conf-parameter-dict]
backend-list = {{ dumps(apache_dict.values()) }} backend-list = {{ dumps(apache_dict.values()) }}
zope-virtualhost-monster-backend-dict = {{ dumps(zope_virtualhost_monster_backend_dict) }}
ip-list = {{ dumps(apache_ip_list) }} ip-list = {{ dumps(apache_ip_list) }}
pid-file = ${directory:run}/apache.pid pid-file = ${directory:run}/apache.pid
log-dir = ${directory:log} log-dir = ${directory:log}
...@@ -230,6 +253,10 @@ recipe = slapos.cookbook:publish.serialised ...@@ -230,6 +253,10 @@ recipe = slapos.cookbook:publish.serialised
{{ family_name ~ '-v6' }} = {% if ipv6_set %}{{ scheme ~ '://[' ~ ipv6 ~ ']:' ~ apache_port }}{% endif %} {{ family_name ~ '-v6' }} = {% if ipv6_set %}{{ scheme ~ '://[' ~ ipv6 ~ ']:' ~ apache_port }}{% endif %}
{{ family_name }} = {{ scheme ~ '://' ~ ipv4 ~ ':' ~ apache_port }} {{ family_name }} = {{ scheme ~ '://' ~ ipv4 ~ ':' ~ apache_port }}
{% endfor -%} {% endfor -%}
{% for family_name, test_runner_url_list in test_runner_url_dict.items() -%}
{{ family_name ~ '-test-runner-url-list' }} = {{ dumps(test_runner_url_list) }}
{% endfor -%}
monitor-base-url = ${monitor-publish-parameters:monitor-base-url} monitor-base-url = ${monitor-publish-parameters:monitor-base-url}
[apache-ssl] [apache-ssl]
...@@ -304,10 +331,19 @@ crl = ${:ca-dir}/crl ...@@ -304,10 +331,19 @@ crl = ${:ca-dir}/crl
apachedex = ${monitor-directory:private}/apachedex apachedex = ${monitor-directory:private}/apachedex
[{{ section('monitor-generate-apachedex-report') }}] [{{ section('monitor-generate-apachedex-report') }}]
recipe = slapos.cookbook:cron.d
cron-entries = ${cron:cron-entries}
name = generate-apachedex-report
# The goal is to be executed before logrotate log rotation.
# Here, logrotate-entry-base:frequency = daily, so we run at 23 o'clock every day.
frequency = 0 23 * * *
command = ${monitor-generate-apachedex-report-wrapper:wrapper-path}
[monitor-generate-apachedex-report-wrapper]
recipe = slapos.cookbook:wrapper recipe = slapos.cookbook:wrapper
wrapper-path = ${monitor-directory:reports}/${:command} wrapper-path = ${directory:bin}/${:command}
command-line = "{{ parameter_dict['run-apachedex-location'] }}" "{{ parameter_dict['apachedex-location'] }}" "${directory:apachedex}" ${monitor-publish-parameters:monitor-base-url}/private/apachedex --apache-log-list "${apachedex-parameters:apache-log-list}" --configuration "${apachedex-parameters:configuration}" command-line = "{{ parameter_dict['run-apachedex-location'] }}" "{{ parameter_dict['apachedex-location'] }}" "${directory:apachedex}" ${monitor-publish-parameters:monitor-base-url}/private/apachedex --apache-log-list "${apachedex-parameters:apache-log-list}" --configuration "${apachedex-parameters:configuration}"
command = apachedex_every_23_hour command = generate-apachedex-report
[apachedex-parameters] [apachedex-parameters]
# XXX - Sample log file with curent date: apache_access.log-%(date)s.gz # XXX - Sample log file with curent date: apache_access.log-%(date)s.gz
...@@ -321,6 +357,11 @@ recipe = slapos.cookbook:wrapper ...@@ -321,6 +357,11 @@ recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:promise}/check-apachedex-result wrapper-path = ${directory:promise}/check-apachedex-result
command-line = "{{ parameter_dict['promise-check-apachedex-result'] }}" --apachedex_path "${directory:apachedex}" --status_file ${monitor-directory:private}/apachedex.report.json --threshold "${apachedex-parameters:promise-threshold}" command-line = "{{ parameter_dict['promise-check-apachedex-result'] }}" --apachedex_path "${directory:apachedex}" --status_file ${monitor-directory:private}/apachedex.report.json --threshold "${apachedex-parameters:promise-threshold}"
[{{ section('promise-check-computer-memory') }}]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:promise}/check-computer-memory
command-line = "{{ parameter_dict["check-computer-memory-binary"] }}" -db ${monitor-instance-parameter:collector-db} --threshold "{{ slapparameter_dict["computer-memory-percent-threshold"] }}" --unit percent
[monitor-instance-parameter] [monitor-instance-parameter]
monitor-httpd-ipv6 = {{ (ipv6_set | list)[0] }} monitor-httpd-ipv6 = {{ (ipv6_set | list)[0] }}
monitor-httpd-port = {{ next_port() }} monitor-httpd-port = {{ next_port() }}
......
...@@ -8,6 +8,16 @@ ...@@ -8,6 +8,16 @@
{% set jupyter_dict = slapparameter_dict.get('jupyter', {}) -%} {% set jupyter_dict = slapparameter_dict.get('jupyter', {}) -%}
{% set has_jupyter = jupyter_dict.get('enable', jupyter_enable_default.lower() in ('true', 'yes')) -%} {% set has_jupyter = jupyter_dict.get('enable', jupyter_enable_default.lower() in ('true', 'yes')) -%}
{% set jupyter_zope_family = jupyter_dict.get('zope-family', '') -%} {% set jupyter_zope_family = jupyter_dict.get('zope-family', '') -%}
{% set test_runner_enabled = slapparameter_dict.get('test-runner', {}).get('enabled', True) -%}
{% set test_runner_node_count = slapparameter_dict.get('test-runner', {}).get('node-count', 3) -%}
{% set test_runner_extra_database_count = slapparameter_dict.get('test-runner', {}).get('extra-database-count', 3) -%}
{% set test_runner_total_database_count = test_runner_node_count * (1 + test_runner_extra_database_count) -%}
{# Backward compatibility for mariadb.test-database-amount #}
{% set mariadb_test_database_amount = slapparameter_dict.get('mariadb', {}).get('test-database-amount') -%}
{% if mariadb_test_database_amount is not none -%}
{% set test_runner_total_database_count = mariadb_test_database_amount %}
{% set test_runner_enabled = mariadb_test_database_amount > 0 %}
{% endif -%}
{% set monitor_base_url_dict = {} -%} {% set monitor_base_url_dict = {} -%}
{% set caucase_url = slapparameter_dict.get('caucase', {}).pop('url', '') -%} {% set caucase_url = slapparameter_dict.get('caucase', {}).pop('url', '') -%}
{% set monitor_dict = slapparameter_dict.get('monitor', {}) %} {% set monitor_dict = slapparameter_dict.get('monitor', {}) %}
...@@ -15,6 +25,7 @@ ...@@ -15,6 +25,7 @@
[request-common] [request-common]
<= request-common-base <= request-common-base
config-use-ipv6 = {{ dumps(slapparameter_dict.get('use-ipv6', False)) }} config-use-ipv6 = {{ dumps(slapparameter_dict.get('use-ipv6', False)) }}
config-computer-memory-percent-threshold = {{ dumps(monitor_dict.get('computer-memory-percent-threshold', 80)) }}
{% macro request(name, software_type, config_key, config, ret={'url': True}, key_config={}) -%} {% macro request(name, software_type, config_key, config, ret={'url': True}, key_config={}) -%}
{% do config.update(slapparameter_dict.get(config_key, {})) -%} {% do config.update(slapparameter_dict.get(config_key, {})) -%}
...@@ -44,7 +55,7 @@ config-name = {{ name }} ...@@ -44,7 +55,7 @@ config-name = {{ name }}
{{ request('memcached-persistent', 'kumofs', 'kumofs', {'tcpv4-port': 2000}, {'url': True, 'monitor-base-url': False}, key_config={'monitor-passwd': 'monitor-htpasswd:passwd'}) }} {{ request('memcached-persistent', 'kumofs', 'kumofs', {'tcpv4-port': 2000}, {'url': True, 'monitor-base-url': False}, key_config={'monitor-passwd': 'monitor-htpasswd:passwd'}) }}
{{ request('memcached-volatile', 'kumofs', 'memcached', {'tcpv4-port': 2010, 'ram-storage-size': 64}, {'url': True, 'monitor-base-url': False}, key_config={'monitor-passwd': 'monitor-htpasswd:passwd'}) }} {{ request('memcached-volatile', 'kumofs', 'memcached', {'tcpv4-port': 2010, 'ram-storage-size': 64}, {'url': True, 'monitor-base-url': False}, key_config={'monitor-passwd': 'monitor-htpasswd:passwd'}) }}
{{ request('mariadb', 'mariadb', 'mariadb', {'tcpv4-port': 2099, 'max-slowqueries-threshold': monitor_dict.get('max-slowqueries-threshold', 1000), 'slowest-query-threshold': monitor_dict.get('slowest-query-threshold', '') }, {'database-list': True, 'test-database-list': True, 'monitor-base-url': False}, key_config={'monitor-passwd': 'monitor-htpasswd:passwd'}) }} {{ request('mariadb', 'mariadb', 'mariadb', {'tcpv4-port': 2099, 'max-slowqueries-threshold': monitor_dict.get('max-slowqueries-threshold', 1000), 'slowest-query-threshold': monitor_dict.get('slowest-query-threshold', ''), 'test-database-amount': test_runner_total_database_count}, {'database-list': True, 'test-database-list': True, 'monitor-base-url': False}, key_config={'monitor-passwd': 'monitor-htpasswd:passwd'}) }}
{% if has_posftix -%} {% if has_posftix -%}
{{ request('smtp', 'postfix', 'smtp', {'tcpv4-port': 2025, 'smtpd-sasl-user': 'erp5@nowhere'}, key_config={'smtpd-sasl-password': 'publish-early:smtpd-sasl-password'}) }} {{ request('smtp', 'postfix', 'smtp', {'tcpv4-port': 2025, 'smtpd-sasl-user': 'erp5@nowhere'}, key_config={'smtpd-sasl-password': 'publish-early:smtpd-sasl-password'}) }}
{%- else %} {%- else %}
...@@ -93,45 +104,7 @@ connection-http-url = {{ caucase_url }} ...@@ -93,45 +104,7 @@ connection-http-url = {{ caucase_url }}
{% endif -%} {% endif -%}
{% endfor -%} {% endfor -%}
[publish-early] {% set zope_partition_dict = slapparameter_dict.get('zope-partition-dict', {'1': {}}) -%}
recipe = slapos.cookbook:publish-early
-init =
inituser-password gen-password:passwd
deadlock-debugger-password gen-deadlock-debugger-password:passwd
{%- if has_posftix %}
smtpd-sasl-password gen-smtpd-sasl-password:passwd
{%- endif %}
{%- if neo %}
neo-cluster gen-neo-cluster:name
{%- if neo[0] %}
neo-cluster = {{ dumps(neo[0]) }}
{%- endif %}
{%- endif %}
{%- set inituser_password = slapparameter_dict.get('inituser-password') %}
{%- if inituser_password %}
inituser-password = {{ dumps(inituser_password) }}
{%- endif %}
{%- set deadlock_debugger_password = slapparameter_dict.get('deadlock-debugger-password') -%}
{%- if deadlock_debugger_password %}
deadlock-debugger-password = {{ dumps(deadlock_debugger_password) }}
{%- endif %}
[gen-password]
recipe = slapos.cookbook:generate.password
storage-path =
[gen-deadlock-debugger-password]
<= gen-password
[gen-neo-cluster-base]
<= gen-password
[gen-neo-cluster]
name = neo-${gen-neo-cluster-base:passwd}
[gen-smtpd-sasl-password]
< = gen-password
{% set zope_partition_dict = slapparameter_dict.get('zope-partition-dict', {'1': {}}) -%} {% set zope_partition_dict = slapparameter_dict.get('zope-partition-dict', {'1': {}}) -%}
{% set zope_address_list_id_dict = {} -%} {% set zope_address_list_id_dict = {} -%}
{% if zope_partition_dict -%} {% if zope_partition_dict -%}
...@@ -142,6 +115,9 @@ return = ...@@ -142,6 +115,9 @@ return =
zope-address-list zope-address-list
hosts-dict hosts-dict
monitor-base-url monitor-base-url
{%- if test_runner_enabled %}
test-runner-address-list
{% endif %}
{% set bt5_default_list = 'erp5_full_text_myisam_catalog slapos_configurator' -%} {% set bt5_default_list = 'erp5_full_text_myisam_catalog slapos_configurator' -%}
{% if has_jupyter -%} {% if has_jupyter -%}
{% set bt5_default_list = bt5_default_list + ' erp5_data_notebook' -%} {% set bt5_default_list = bt5_default_list + ' erp5_data_notebook' -%}
...@@ -152,6 +128,7 @@ config-caucase-url = ${request-caucase:connection-http-url} ...@@ -152,6 +128,7 @@ config-caucase-url = ${request-caucase:connection-http-url}
config-cloudooo-url = {{ dumps(slapparameter_dict.get('cloudooo-url', default_cloudooo_url)) }} config-cloudooo-url = {{ dumps(slapparameter_dict.get('cloudooo-url', default_cloudooo_url)) }}
config-deadlock-debugger-password = ${publish-early:deadlock-debugger-password} config-deadlock-debugger-password = ${publish-early:deadlock-debugger-password}
config-developer-list = {{ dumps(slapparameter_dict.get('developer-list', [inituser_login])) }} config-developer-list = {{ dumps(slapparameter_dict.get('developer-list', [inituser_login])) }}
config-saucelabs-dict = {{ dumps(slapparameter_dict.get('saucelabs-dict', {})) }}
config-hosts-dict = {{ dumps(slapparameter_dict.get('hosts-dict', {})) }} config-hosts-dict = {{ dumps(slapparameter_dict.get('hosts-dict', {})) }}
config-hostalias-dict = {{ dumps(slapparameter_dict.get('hostalias-dict', {})) }} config-hostalias-dict = {{ dumps(slapparameter_dict.get('hostalias-dict', {})) }}
config-id-store-interval = {{ dumps(slapparameter_dict.get('id-store-interval')) }} config-id-store-interval = {{ dumps(slapparameter_dict.get('id-store-interval')) }}
...@@ -169,6 +146,8 @@ config-cloudooo-retry-count = {{ slapparameter_dict.get('cloudooo-retry-count', ...@@ -169,6 +146,8 @@ config-cloudooo-retry-count = {{ slapparameter_dict.get('cloudooo-retry-count',
config-wendelin-core-zblk-fmt = {{ dumps(slapparameter_dict.get('wendelin-core-zblk-fmt', '')) }} config-wendelin-core-zblk-fmt = {{ dumps(slapparameter_dict.get('wendelin-core-zblk-fmt', '')) }}
config-ca-path = ${directory:ca-dir} config-ca-path = ${directory:ca-dir}
config-zodb-dict = {{ dumps(zodb_dict) }} config-zodb-dict = {{ dumps(zodb_dict) }}
config-test-runner-enabled = {{ dumps(test_runner_enabled) }}
config-test-runner-node-count = {{ dumps(test_runner_node_count) }}
{% for server_type, server_dict in storage_dict.iteritems() -%} {% for server_type, server_dict in storage_dict.iteritems() -%}
{% if server_type == 'neo' -%} {% if server_type == 'neo' -%}
config-neo-cluster = ${publish-early:neo-cluster} config-neo-cluster = ${publish-early:neo-cluster}
...@@ -183,6 +162,7 @@ config-tidstorage-port = ${request-zodb:connection-tidstorage-port} ...@@ -183,6 +162,7 @@ config-tidstorage-port = ${request-zodb:connection-tidstorage-port}
software-type = zope software-type = zope
{% set zope_family_dict = {} -%} {% set zope_family_dict = {} -%}
{% set zope_family_name_list = [] -%}
{% set zope_backend_path_dict = {} -%} {% set zope_backend_path_dict = {} -%}
{% set ssl_authentication_dict = {} -%} {% set ssl_authentication_dict = {} -%}
{% set jupyter_zope_family_default = [] -%} {% set jupyter_zope_family_default = [] -%}
...@@ -190,6 +170,7 @@ software-type = zope ...@@ -190,6 +170,7 @@ software-type = zope
{% set partition_name = 'zope-' ~ custom_name -%} {% set partition_name = 'zope-' ~ custom_name -%}
{% set section_name = 'request-' ~ partition_name -%} {% set section_name = 'request-' ~ partition_name -%}
{% set zope_family = zope_parameter_dict.get('family', 'default') -%} {% set zope_family = zope_parameter_dict.get('family', 'default') -%}
{% do zope_family_name_list.append(zope_family) %}
{% set backend_path = zope_parameter_dict.get('backend-path', '/') % {'site-id': site_id} %} {% set backend_path = zope_parameter_dict.get('backend-path', '/') % {'site-id': site_id} %}
{# # default jupyter zope family is first zope family. -#} {# # default jupyter zope family is first zope family. -#}
{# # use list.append() to update it, because in jinja2 set changes only local scope. -#} {# # use list.append() to update it, because in jinja2 set changes only local scope. -#}
...@@ -214,6 +195,7 @@ config-longrequest-logger-timeout = {{ dumps(zope_parameter_dict.get('longreques ...@@ -214,6 +195,7 @@ config-longrequest-logger-timeout = {{ dumps(zope_parameter_dict.get('longreques
config-large-file-threshold = {{ dumps(zope_parameter_dict.get('large-file-threshold', "10MB")) }} config-large-file-threshold = {{ dumps(zope_parameter_dict.get('large-file-threshold', "10MB")) }}
config-port-base = {{ dumps(zope_parameter_dict.get('port-base', 2200)) }} config-port-base = {{ dumps(zope_parameter_dict.get('port-base', 2200)) }}
config-webdav = {{ dumps(zope_parameter_dict.get('webdav', False)) }} config-webdav = {{ dumps(zope_parameter_dict.get('webdav', False)) }}
config-test-runner-apache-url-list = ${publish-early:{{ zope_family }}-test-runner-url-list}
{% endfor -%} {% endfor -%}
{# if not explicitly configured, connect jupyter to first zope family, which -#} {# if not explicitly configured, connect jupyter to first zope family, which -#}
...@@ -309,6 +291,9 @@ return = ...@@ -309,6 +291,9 @@ return =
{%- for family in zope_family_dict %} {%- for family in zope_family_dict %}
{{ family }} {{ family }}
{{ family }}-v6 {{ family }}-v6
{% if test_runner_enabled %}
{{ family }}-test-runner-url-list
{% endif %}
{% endfor -%} {% endfor -%}
{% do monitor_base_url_dict.__setitem__('request-balancer', '${' ~ 'request-balancer' ~ ':connection-monitor-base-url}') -%} {% do monitor_base_url_dict.__setitem__('request-balancer', '${' ~ 'request-balancer' ~ ':connection-monitor-base-url}') -%}
...@@ -316,6 +301,9 @@ config-zope-family-dict = {{ dumps(zope_family_parameter_dict) }} ...@@ -316,6 +301,9 @@ config-zope-family-dict = {{ dumps(zope_family_parameter_dict) }}
config-tcpv4-port = {{ dumps(balancer_dict.get('tcpv4-port', 2150)) }} config-tcpv4-port = {{ dumps(balancer_dict.get('tcpv4-port', 2150)) }}
{% for zope_section_id, name in zope_address_list_id_dict.items() -%} {% for zope_section_id, name in zope_address_list_id_dict.items() -%}
config-{{ name }} = {{ ' ${' ~ zope_section_id ~ ':connection-zope-address-list}' }} config-{{ name }} = {{ ' ${' ~ zope_section_id ~ ':connection-zope-address-list}' }}
{% if test_runner_enabled -%}
config-{{ name }}-test-runner-address-list = {{ ' ${' ~ zope_section_id ~ ':connection-test-runner-address-list}' }}
{% endif -%}
{% endfor -%} {% endfor -%}
# XXX: should those really be same for all families ? # XXX: should those really be same for all families ?
config-haproxy-server-check-path = {{ dumps(balancer_dict.get('haproxy-server-check-path', '/') % {'site-id': site_id}) }} config-haproxy-server-check-path = {{ dumps(balancer_dict.get('haproxy-server-check-path', '/') % {'site-id': site_id}) }}
...@@ -377,6 +365,57 @@ hosts-dict = {{ '${' ~ zope_address_list_id_dict.keys()[0] ~ ':connection-hosts- ...@@ -377,6 +365,57 @@ hosts-dict = {{ '${' ~ zope_address_list_id_dict.keys()[0] ~ ':connection-hosts-
{% for name, value in publish_dict.items() -%} {% for name, value in publish_dict.items() -%}
{{ name }} = {{ value }} {{ name }} = {{ value }}
{% endfor -%} {% endfor -%}
{% for zope_family_name in zope_family_name_list -%}
{{ zope_family_name }}-test-runner-url-list = ${request-balancer:connection-{{ zope_family_name }}-test-runner-url-list}
{% endfor -%}
[publish-early]
recipe = slapos.cookbook:publish-early
-init =
inituser-password gen-password:passwd
deadlock-debugger-password gen-deadlock-debugger-password:passwd
{%- if has_posftix %}
smtpd-sasl-password gen-smtpd-sasl-password:passwd
{%- endif %}
{% for zope_family_name in zope_family_name_list %}
{{ zope_family_name }}-test-runner-url-list default-balancer-test-runner-url-list:default
{% endfor -%}
{%- if neo %}
neo-cluster gen-neo-cluster:name
{%- if neo[0] %}
neo-cluster = {{ dumps(neo[0]) }}
{%- endif %}
{%- endif %}
{%- set inituser_password = slapparameter_dict.get('inituser-password') %}
{%- if inituser_password %}
inituser-password = {{ dumps(inituser_password) }}
{%- endif %}
{%- set deadlock_debugger_password = slapparameter_dict.get('deadlock-debugger-password') -%}
{%- if deadlock_debugger_password %}
deadlock-debugger-password = {{ dumps(deadlock_debugger_password) }}
{%- endif %}
[default-balancer-test-runner-url-list]
recipe =
default = not-ready
[gen-password]
recipe = slapos.cookbook:generate.password
storage-path =
[gen-deadlock-debugger-password]
<= gen-password
[gen-neo-cluster-base]
<= gen-password
[gen-neo-cluster]
name = neo-${gen-neo-cluster-base:passwd}
[gen-smtpd-sasl-password]
< = gen-password
[monitor-instance-parameter] [monitor-instance-parameter]
monitor-httpd-port = 8386 monitor-httpd-port = 8386
......
...@@ -18,11 +18,11 @@ md5sum = 713db528880282d568278f09458d2aab ...@@ -18,11 +18,11 @@ md5sum = 713db528880282d568278f09458d2aab
[template-runner] [template-runner]
filename = instance-runner.cfg filename = instance-runner.cfg
md5sum = cefbc8abef234c9a74f88688a0ac0030 md5sum = e12255a8c946b3eb8c6373fff481339f
[template-runner-import-script] [template-runner-import-script]
filename = template/runner-import.sh.jinja2 filename = template/runner-import.sh.jinja2
md5sum = 92ac3f6982dec97e3b2df90f97111bd3 md5sum = ed2e08c07a6727b2012f15da67c0705d
[instance-runner-import] [instance-runner-import]
filename = instance-runner-import.cfg.in filename = instance-runner-import.cfg.in
...@@ -30,7 +30,7 @@ md5sum = 7a879739afe55320ee96409bcc8a52ab ...@@ -30,7 +30,7 @@ md5sum = 7a879739afe55320ee96409bcc8a52ab
[template-runner-export-script] [template-runner-export-script]
filename = template/runner-export.sh.jinja2 filename = template/runner-export.sh.jinja2
md5sum = 5877e70b2bd5cfe06aff793125f65d6a md5sum = 231f9b74862f8991f54326511e76f5ec
[instance-runner-export] [instance-runner-export]
filename = instance-runner-export.cfg.in filename = instance-runner-export.cfg.in
......
...@@ -86,8 +86,24 @@ check-secure = 1 ...@@ -86,8 +86,24 @@ check-secure = 1
dash_path = {{ dash_executable_location }} dash_path = {{ dash_executable_location }}
curl_path = {{ curl_executable_location }} curl_path = {{ curl_executable_location }}
[custom-frontend-url-ready-promise]
recipe = slapos.recipe.template:jinja2
path = $${directory:promises}/custom_frontend_ready_promise
url = https://$${request-custom-frontend:connection-domain}
rendered = $${directory:promises}/custom_frontend_ready_promise
template = inline:
#!{{ dash_executable_location }}
URL="$${:url}"
CODE=$({{ curl_executable_location }} -g -k -sL $URL -w %{http_code} --max-time 5 -o /dev/null)
if [ $? -eq 3 ]; then
echo "Custom frontend URL malformed: $URL." >&2
exit 1
fi
[publish-connection-information] [publish-connection-information]
custom-frontend-url = https://$${request-custom-frontend:connection-domain} custom-frontend-url = $${custom-frontend-url-ready-promise:url}
{% endif %} {% endif %}
# Create all needed directories # Create all needed directories
......
...@@ -56,7 +56,12 @@ if 1: ...@@ -56,7 +56,12 @@ if 1:
print("*.pid") print("*.pid")
print(".installed*.cfg") print(".installed*.cfg")
for partition in glob.glob(path + "/instance/slappart*"): for partition in glob.glob(path + "/instance/slappart*"):
os.chdir(partition) try:
os.chdir(partition)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
try: try:
with open("srv/exporter.exclude") as f: with open("srv/exporter.exclude") as f:
exclude = f.readlines() exclude = f.readlines()
...@@ -104,34 +109,32 @@ fi ...@@ -104,34 +109,32 @@ fi
# Check that export didn't happen during backup of instances # Check that export didn't happen during backup of instances
tmp_backup_sum=$(mktemp -p "$tmp_directory") tmp_backup_sum=$(mktemp -p "$tmp_directory")
tmp_filtered_signature=$(mktemp -p "$tmp_directory")
remove_tmp_files () { remove_tmp_files () {
rm "$tmp_backup_sum" "$tmp_filtered_signature" rm "$tmp_backup_sum"
} }
trap remove_tmp_files EXIT trap remove_tmp_files EXIT
cd {{ directory['backup'] }}
# Wait a little to increase the probability to detect an ongoing backup.
sleep 5
# Getting files from runner backup directory, as instance backup files may be # Getting files from runner backup directory, as instance backup files may be
# explicitely excluded from the backup, using the srv/exporter.exclude # explicitely excluded from the backup, using the srv/exporter.exclude
backup_directory_path="$tmp_directory/backup_files.txt" find -path "./runner/instance/slappart*/srv/backup/*" -type f ! -name backup.signature -print0 |
cd {{ directory['backup'] }} xargs -r0 sha256sum | sort -k 66 > "$tmp_backup_sum"
find . -path "./runner/instance/slappart*/srv/backup/*" -type f -print0 > $backup_directory_path
# If no backup found, it's over # If no backup found, it's over
if [ ! -s "$backup_directory_path" ]; then if [ ! -s "$tmp_backup_sum" ]; then
exit 0 exit 0
fi fi
sleep 5
cat $backup_directory_path | xargs -0 sha256sum | sort -k 66 > "$tmp_backup_sum"
rm $backup_directory_path
egrep "instance/slappart.*/srv/backup/" "$backup_directory/backup.signature" > "$tmp_filtered_signature"
# If the diff fails, then the notifier will restart this script # If the diff fails, then the notifier will restart this script
if diff "$tmp_backup_sum" "$tmp_filtered_signature"; then grep "instance/slappart.*/srv/backup/" "$backup_directory/backup.signature" |
exit 0 diff "$tmp_backup_sum" - || {
fi echo "ERROR: Some backups are not consistent, exporter should be re-run."
echo "ERROR: Some backups are not consistent, exporter should be re-run." echo "Let's sleep {{ backup_wait_time }} minutes, to let the backup end..."
echo "Let's sleep {{ backup_wait_time }} minutes, to let the backup end..." sleep {{ backup_wait_time }}m
sleep {{ backup_wait_time }}m exit 1
exit 1 }
...@@ -69,7 +69,12 @@ if 1: ...@@ -69,7 +69,12 @@ if 1:
print("*.pid") print("*.pid")
print(".installed*.cfg") print(".installed*.cfg")
for partition in glob.glob(path + "/instance/slappart*"): for partition in glob.glob(path + "/instance/slappart*"):
os.chdir(partition) try:
os.chdir(partition)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
try: try:
with open("srv/exporter.exclude") as f: with open("srv/exporter.exclude") as f:
exclude = f.readlines() exclude = f.readlines()
...@@ -149,7 +154,19 @@ fi ...@@ -149,7 +154,19 @@ fi
# XXX hardcoded # XXX hardcoded
PARTITION=$(basename $HOME) PARTITION=$(basename $HOME)
OLD_SOFTWARE_RELEASE=$(db_query "select software_release from partition11 where reference='slappart0';") OLD_SOFTWARE_RELEASE=$(db_query "select software_release from partition11 where reference='slappart0';")
SOFTWARE_RELEASE=$(echo "$OLD_SOFTWARE_RELEASE" | sed 's/\/\(slappart\|test0-\)[0-9][0-9]*\//\/'"$PARTITION"'\//') SOFTWARE_RELEASE=$({{ sys.executable }} - $OLD_SOFTWARE_RELEASE $PARTITION <<EOF
if 1:
import os, re, sys
# We want to replace the last occurence only
old_software_release, partition = sys.argv[1], sys.argv[2]
for match in re.finditer("(slappart|test0-)[0-9][0-9]*", old_software_release):
start, end = match.start(), match.end()
print old_software_release[:start] + partition + old_software_release[end:]
EOF
)
db_query "update partition11 set software_release='$SOFTWARE_RELEASE' where software_release NOT NULL;" db_query "update partition11 set software_release='$SOFTWARE_RELEASE' where software_release NOT NULL;"
db_query "update software11 set url='$SOFTWARE_RELEASE' where url='$OLD_SOFTWARE_RELEASE';" || db_query "delete from software11 where url='$OLD_SOFTWARE_RELEASE';" db_query "update software11 set url='$SOFTWARE_RELEASE' where url='$OLD_SOFTWARE_RELEASE';" || db_query "delete from software11 where url='$OLD_SOFTWARE_RELEASE';"
# Change slapproxy database to have all instances stopped # Change slapproxy database to have all instances stopped
......
...@@ -712,7 +712,7 @@ oauth2client = 4.0.0 ...@@ -712,7 +712,7 @@ oauth2client = 4.0.0
objgraph = 3.1.0 objgraph = 3.1.0
ply = 3.10 ply = 3.10
polib = 1.0.8 polib = 1.0.8
pprofile = 2.0.0 pprofile = 2.0.1
pyasn1 = 0.2.3 pyasn1 = 0.2.3
pyasn1-modules = 0.0.8 pyasn1-modules = 0.0.8
pycountry = 17.1.8 pycountry = 17.1.8
......
...@@ -22,4 +22,4 @@ md5sum = 117e46af6d9d31c09eeb86089d11407e ...@@ -22,4 +22,4 @@ md5sum = 117e46af6d9d31c09eeb86089d11407e
[template-logrotate-base] [template-logrotate-base]
filename = instance-logrotate-base.cfg.in filename = instance-logrotate-base.cfg.in
md5sum = 9f44fc5ee22c6662297b41f71fa11b7c md5sum = f56b86a0742afff931a5e972114566e4
[buildout] [buildout]
parts = parts =
cron-service
cron-entry-logrotate cron-entry-logrotate
logrotate-entry-cron logrotate-entry-cron
...@@ -10,7 +11,16 @@ dcrond-binary = {{ dcron_location }}/sbin/crond ...@@ -10,7 +11,16 @@ dcrond-binary = {{ dcron_location }}/sbin/crond
crontabs = ${logrotate-directory:crontabs} crontabs = ${logrotate-directory:crontabs}
cronstamps = ${logrotate-directory:cronstamps} cronstamps = ${logrotate-directory:cronstamps}
catcher = ${cron-simplelogger:wrapper} catcher = ${cron-simplelogger:wrapper}
binary = ${logrotate-directory:services}/crond binary = ${logrotate-directory:bin}/crond
# This is here so to keep backward compatibility where the cron
# section should create the service too and keep variables here
service = ${cron-service:wrapper-path}
[cron-service]
recipe = slapos.cookbook:wrapper
command-line = ${cron:binary}
wrapper-path = ${logrotate-directory:services}/crond
hash-files = ${buildout:directory}/software_release/buildout.cfg
[cron-simplelogger] [cron-simplelogger]
recipe = slapos.cookbook:simplelogger recipe = slapos.cookbook:simplelogger
...@@ -51,7 +61,7 @@ post = ...@@ -51,7 +61,7 @@ post =
pre = pre =
frequency = daily frequency = daily
rotate-num = 3650 rotate-num = 3650
nocompress = nocompress =
[logrotate] [logrotate]
recipe = slapos.cookbook:wrapper recipe = slapos.cookbook:wrapper
......
...@@ -2,7 +2,7 @@ Monitor stack ...@@ -2,7 +2,7 @@ Monitor stack
============= =============
* This stack has for purpose to know if all promises, services, custom monitoring scripts went/are ok. * This stack has for purpose to know if all promises, services, custom monitoring scripts went/are ok.
* It also provides a web interface, to see which promises, instance and hosting subscription status. It also provide a rss feed to easily know the actual state of your instance, and to know when it started to went bad. You can also add your own monitoring scripts. * It also provides a web interface, to see promises, instances and hosting subscriptions status. It also provides a rss feed to easily know the actual state of your instance, and to know when it started to went bad. You can also add your own monitoring scripts.
Implementation : Implementation :
...@@ -69,13 +69,13 @@ You don't need to define all parameters, you can only set what is required to be ...@@ -69,13 +69,13 @@ You don't need to define all parameters, you can only set what is required to be
If you have sub-instances, you should collect the base monitor url from all instances with monitor and send it to monitor-url-list or you can override "monitor-base-url-dict" section and add all the urls as key/value pairs in the root instance. If you have sub-instances, you should collect the base monitor url from all instances with monitor and send it to monitor-url-list or you can override "monitor-base-url-dict" section and add all the urls as key/value pairs in the root instance.
[monitor-base-url-list] [monitor-base-url-dict]
monitor1-url = https://[xxxx:xxx:xxxx:e:11::1fb1]:4200 monitor1-url = https://[xxxx:xxx:xxxx:e:11::1fb1]:4200
monitor2-url = https://[xxxx:xxx:xxxx:e:22::2fb2]:4200 monitor2-url = https://[xxxx:xxx:xxxx:e:22::2fb2]:4200
.. ..
.. ..
Also, All monitors of the sub instances need to have same password as the password of the root instance monitor. Also, all monitors of the sub instances need to have same password as the password of the root instance monitor.
NB: You should use double $ (ex: $${monitor-template:rendered}) instead of one $ in your instance template file if it's not a jinja template. See: NB: You should use double $ (ex: $${monitor-template:rendered}) instead of one $ in your instance template file if it's not a jinja template. See:
- Jinja template file exemple, use one $: https://lab.nexedi.com/nexedi/slapos/blob/master/software/slaprunner/instance-resilient-test.cfg.jinja2 - Jinja template file exemple, use one $: https://lab.nexedi.com/nexedi/slapos/blob/master/software/slaprunner/instance-resilient-test.cfg.jinja2
...@@ -89,9 +89,9 @@ To learn how to write a promise in SlapOS, please read this document: ...@@ -89,9 +89,9 @@ To learn how to write a promise in SlapOS, please read this document:
https://www.erp5.com/slapos-TechnicalNote.General.SlapOS.Monitoring.Specifications https://www.erp5.com/slapos-TechnicalNote.General.SlapOS.Monitoring.Specifications
Writing a promise consists of defining a class called RunPromise which inherits from GenericPromise class and defining methods: anomaly(), sense() and test(). Python promises should be placed into the folder etc/plugin of the computer partition. Writing a promise consists of defining a class called RunPromise which inherits from GenericPromise class and defining methods: anomaly(), sense() and test(). Python promises should be placed into the folder etc/plugin of the computer partition.
New promises should be placed into the folder etc/plugin, legacy promise are into the folder etc/promise. Legacy promises are bash or other executable promises script which does not use GenericPromise class. New promises should be placed into the folder etc/plugin, legacy promises are into the folder etc/promise. Legacy promises are bash or other executable promises script which does not use GenericPromise class.
You will use slapos.cookbook:promise.plugin to generate your promise script into `etc/plugin` directory. Add promise will look like this: You will use slapos.cookbook:promise.plugin to generate your promise script into `etc/plugin` directory. Adding a promise will look like this:
[promise-check-site] [promise-check-site]
recipe = slapos.cookbook:promise.plugin recipe = slapos.cookbook:promise.plugin
...@@ -107,8 +107,7 @@ You will use slapos.cookbook:promise.plugin to generate your promise script into ...@@ -107,8 +107,7 @@ You will use slapos.cookbook:promise.plugin to generate your promise script into
Then you will have to add `promise-check-site` section to buildout parts, so it will be installed. Then you will have to add `promise-check-site` section to buildout parts, so it will be installed.
In your promise code, you will be able to call `self.getConfig('site-url')`, `self.getConfig('connection-timeout')` and `self.getConfig('foo')`. The In your promise code, you will be able to call `self.getConfig('site-url')`, `self.getConfig('connection-timeout')` and `self.getConfig('foo')`. The returned value is `None` if the config parameter is not set.
returned value is `None` if the config parameter is not set.
Slapgrid will run each promise every time a partition is processed (every minutes in theory), if the partition is up to date, slapgrid will only run promises anomaly check and save the result in a json file. Here is an exemple of promise result: Slapgrid will run each promise every time a partition is processed (every minutes in theory), if the partition is up to date, slapgrid will only run promises anomaly check and save the result in a json file. Here is an exemple of promise result:
...@@ -197,8 +196,7 @@ To publish configuration URL in your instance.cfg, you can do like this: ...@@ -197,8 +196,7 @@ To publish configuration URL in your instance.cfg, you can do like this:
Send parameters to monitor interface Send parameters to monitor interface
------------------------------------ ------------------------------------
Monitor has a paramters called "instance-configuration" from the section [monitor-instance-parameter] Monitor has a paramters called "instance-configuration" from the section [monitor-instance-parameter] that can be updated to specify which parameters will be deployed on monitor web interface.
that can be updated to specify which parameters will be deplayed on monitor web interface.
Parameters can be editable (except raw parameter) directly from monitor interface. The change will be updated into the related file. Here are some examples: Parameters can be editable (except raw parameter) directly from monitor interface. The change will be updated into the related file. Here are some examples:
......
...@@ -14,4 +14,4 @@ ...@@ -14,4 +14,4 @@
# not need these here). # not need these here).
[monitor2-template] [monitor2-template]
filename = instance-monitor.cfg.jinja2.in filename = instance-monitor.cfg.jinja2.in
md5sum = a2da18b7e1d4ae1332e19e5d1b70fcdf md5sum = 2d92f6c50569acbedbc075a84fadbed0
...@@ -65,7 +65,13 @@ recipe = slapos.cookbook:certificate_authority.request ...@@ -65,7 +65,13 @@ recipe = slapos.cookbook:certificate_authority.request
key-file = ${monitor-httpd-conf-parameter:key-file} key-file = ${monitor-httpd-conf-parameter:key-file}
cert-file = ${monitor-httpd-conf-parameter:cert-file} cert-file = ${monitor-httpd-conf-parameter:cert-file}
executable = ${monitor-httpd-wrapper:wrapper-path} executable = ${monitor-httpd-wrapper:wrapper-path}
wrapper = ${directory:services}/monitor-httpd wrapper = ${directory:bin}/monitor-httpd
[ca-monitor-httpd-service]
recipe = slapos.cookbook:wrapper
command-line = ${ca-monitor-httpd:wrapper}
wrapper-path = ${directory:services}/monitor-httpd
hash-files = ${buildout:directory}/software_release/buildout.cfg
[monitor-conf-parameters] [monitor-conf-parameters]
title = ${monitor-instance-parameter:monitor-title} title = ${monitor-instance-parameter:monitor-title}
...@@ -403,7 +409,7 @@ depends = ...@@ -403,7 +409,7 @@ depends =
${certificate-authority:wrapper} ${certificate-authority:wrapper}
${monitor-conf:rendered} ${monitor-conf:rendered}
${start-monitor:wrapper-path} ${start-monitor:wrapper-path}
${ca-monitor-httpd:wrapper} ${ca-monitor-httpd-service:wrapper-path}
${monitor-httpd-promise:filename} ${monitor-httpd-promise:filename}
${monitor-bootstrap-promise:file} ${monitor-bootstrap-promise:file}
${monitor-symlink:recipe} ${monitor-symlink:recipe}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment