Commit 89ec7583 authored by Thomas Gambier's avatar Thomas Gambier 🚴🏼

Update Release Candidate

parents 6e110f27 12c244e5
......@@ -68,7 +68,7 @@ shared = false
[apache-php]
recipe = slapos.recipe.cmmi
url = https://www.php.net/distributions/php-8.2.24.tar.xz
md5sum = 3263dbd4846871dd6fabe042f141eb19
md5sum = fff29ce84f5b4ddfc2063f7b2021fce2
configure-options =
--disable-static
--disable-zend-test
......
......@@ -14,7 +14,8 @@ revision = v1.12.1-nxd3
[babeld]
recipe = slapos.recipe.cmmi
path = ${babeld-repository:location}
make-options = CC='gcc -std=gnu99'
# Fedora's redhat-hardened-ld forces us to use either -fPIC or -fPIE
make-options = CC='gcc -std=gnu99 -fPIE'
configure-command =
echo "No configure.."
environment =
......
[buildout]
extends =
../gnu-config/buildout.cfg
parts =
chrpath
......@@ -6,3 +8,4 @@ parts =
recipe = slapos.recipe.cmmi
url = http://http.debian.net/debian/pool/main/c/chrpath/chrpath_0.16.orig.tar.gz
md5sum = 2bf8d1d1ee345fc8a7915576f5649982
pre-configure = cp -f ${gnu-config:location}/config.sub ${gnu-config:location}/config.guess .
[buildout]
extends =
../numpy/buildout.cfg
../cmake/buildout.cfg
../curl/buildout.cfg
../geos/buildout.cfg
../giflib/buildout.cfg
../jasper/buildout.cfg
../libexpat/buildout.cfg
../libjpeg/buildout.cfg
../libtiff/buildout.cfg
../libxml2/buildout.cfg
../openjpeg/buildout.cfg
../pcre/buildout.cfg
../proj4/buildout.cfg
../pkgconfig/buildout.cfg
../proj/buildout.cfg
../sqlite3/buildout.cfg
../xz-utils/buildout.cfg
......@@ -16,27 +21,30 @@ parts =
[gdal]
recipe = slapos.recipe.cmmi
version = 1.11.1
shared = true
version = 3.2.3
url = http://download.osgeo.org/gdal/${:version}/gdal-${:version}.tar.xz
md5sum = 2e126d7c6605691d38f3e71b945f5c73
md5sum = 6c276978d625d23a091bac9fdddb99db
location = @@LOCATION@@
configure-options =
--with-curl=${curl:location}/bin/curl-config
--with-expat=${libexpat:location}
--with-geos=${geos:location}/bin/geos-config
--with-gif=${giflib:location}
--with-jasper=${jasper:location}
--with-openjpeg=${openjpeg:location}
--with-jpeg=${libjpeg:location}
--with-libtiff=${libtiff:location}
--with-libz=${zlib:location}
--with-png=${libpng:location}
--with-static-proj4=${proj4:location}
--with-proj=${proj:location}
--with-sqlite3=${sqlite3:location}
--with-xml2=${libxml2:location}/bin/xml2-config
--with-xml2=yes
--without-webp
environment =
PATH=${xz-utils:location}/bin:%(PATH)s
PATH=${pkgconfig:location}/bin:${xz-utils:location}/bin:%(PATH)s
CPPFLAGS=-I${pcre:location}/include
LDFLAGS=-L${pcre:location}/lib -Wl,-rpath=${buildout:parts-directory}/${:_buildout_section_name_}/lib -Wl,-rpath=${curl:location}/lib -Wl,-rpath=${geos:location}/lib -Wl,-rpath=${giflib:location}/lib -Wl,-rpath=${jasper:location}/lib -Wl,-rpath=${jbigkit:location}/lib -Wl,-rpath=${libexpat:location}/lib -Wl,-rpath=${libjpeg:location}/lib -Wl,-rpath=${libpng:location}/lib -Wl,-rpath=${libtiff:location}/lib -Wl,-rpath=${libxml2:location}/lib -Wl,-rpath=${openssl:location}/lib -Wl,-rpath=${pcre:location}/lib -Wl,-rpath=${sqlite3:location}/lib -Wl,-rpath=${zlib:location}/lib
LDFLAGS=-L${pcre:location}/lib -Wl,-rpath=${:location}/lib -Wl,-rpath=${proj:location}/lib -Wl,-rpath=${curl:location}/lib -Wl,-rpath=${geos:location}/lib -Wl,-rpath=${giflib:location}/lib -Wl,-rpath=${openjpeg:location}/lib -Wl,-rpath=${jbigkit:location}/lib -Wl,-rpath=${libexpat:location}/lib -Wl,-rpath=${libjpeg:location}/lib -Wl,-rpath=${libpng:location}/lib -Wl,-rpath=${libtiff:location}/lib -Wl,-rpath=${libxml2:location}/lib -Wl,-rpath=${openssl:location}/lib -Wl,-rpath=${pcre:location}/lib -Wl,-rpath=${sqlite3:location}/lib -Wl,-rpath=${zlib:location}/lib
PKG_CONFIG_PATH=${libxml2:location}/lib/pkgconfig
[gdal-python]
recipe = zc.recipe.egg:custom
......
# SlapOS software release to test GDAL on Nexedi testing infrastructure.
[buildout]
extends =
../../stack/slapos-py2.cfg
buildout.cfg
parts =
gdal-interpreter
[gdal-interpreter]
recipe = zc.recipe.egg
interpreter = python-gdal
eggs =
${gdal-python:egg}
Tests for GDAL component
[project]
name = "slapos.test.gdal"
version = "0.0.1.dev0"
description = "Test for SlapOS' GDAL component."
readme = "README.md"
requires-python = ">=3"
license = {text = "GNU General Public License version 3 or later"}
keywords = [
"SlapOS",
"testing",
"GDAL",
]
authors = [
{name = "Nexedi"},
]
maintainers = [
{name = "Nexedi", email = "info@nexedi.com"},
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: GIS",
"Topic :: Software Development :: Testing",
"Typing :: Typed",
]
dependencies = [
"slapos.core",
"slapos.libnetworkcache",
]
[project.urls]
homepage = "https://slapos.nexedi.com"
documentation = "https://slapos.nexedi.com"
repository = "https://lab.nexedi.com/nexedi/slapos"
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[tool.setuptools]
zip-safe = true
\ No newline at end of file
##############################################################################
#
# Copyright (c) 2024 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
# pyright: strict
from pathlib import Path
import subprocess
from slapos.grid.utils import md5digest
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
Path(__file__).parent.parent / "test.cfg"
)
class GDALTestCase(SlapOSInstanceTestCase):
"""Tests for the GDAL component."""
request_instance = False
def test_noinstance(self):
"""Test that no SlapOS instance is being created for this component."""
self.assertFalse(hasattr(self, "computer_partition"))
def test_import(self):
"""Test that the GDAL Python module is importable."""
software_path = (
Path(self.slap.software_directory)
/ md5digest(self.getSoftwareURL())
)
self.assertTrue(software_path.exists())
self.assertTrue(software_path.is_dir())
bin_path = software_path / "bin"
self.assertTrue(bin_path.exists())
self.assertTrue(bin_path.is_dir())
python_exe = bin_path / "python-gdal"
self.assertTrue(python_exe.exists())
self.assertTrue(python_exe.is_file())
subprocess.check_call([python_exe, "-c", "import osgeo.gdal"])
[buildout]
extends =
../cmake/buildout.cfg
parts =
geos
[geos]
recipe = slapos.recipe.cmmi
version = 3.4.2
shared = true
version = 3.12.2
url = http://download.osgeo.org/geos/geos-${:version}.tar.bz2
md5sum = fc5df2d926eb7e67f988a43a92683bae
md5sum = f451aa3884f2ca19ae555f5c7d8de4f8
location = @@LOCATION@@
configure-command = ${cmake:location}/bin/cmake
configure-options =
--disable-dependency-tracking
--disable-static
-DCMAKE_BUILD_TYPE=Release
-DCMAKE_INSTALL_PREFIX=@@LOCATION@@
-DCMAKE_INSTALL_LIBDIR=lib
-DCMAKE_INSTALL_RPATH=${:location}/lib
......@@ -10,12 +10,13 @@ extends =
../pkgconfig/buildout.cfg
../zlib/buildout.cfg
../libexpat/buildout.cfg
../xz-utils/buildout.cfg
[graphviz]
recipe = slapos.recipe.cmmi
shared = true
url = https://ftp.osuosl.org/pub/blfs/conglomeration/graphviz/graphviz-2.40.1.tar.gz
md5sum = 4ea6fd64603536406166600bcc296fc8
url = https://gitlab.com/api/v4/projects/4207231/packages/generic/graphviz-releases/12.1.2/graphviz-12.1.2.tar.xz
md5sum = 54cf8e3b60bc137c72395d664fc6121a
pkg_config_depends = ${pango:location}/lib/pkgconfig:${pango:pkg_config_depends}
configure-options =
--with-included-ltdl
......@@ -58,7 +59,7 @@ configure-options =
--without-libgd
--without-glut
environment =
PATH=${pkgconfig:location}/bin:%(PATH)s
PATH=${pkgconfig:location}/bin:${xz-utils:location}/bin:%(PATH)s
PKG_CONFIG_PATH=${:pkg_config_depends}
CPPFLAGS=-I${zlib:location}/include -I${libexpat:location}/include
LDFLAGS=-L${bzip2:location}/lib -Wl,-rpath=${bzip2:location}/lib -L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib -L${libexpat:location}/lib -Wl,-rpath=${libexpat:location}/lib -Wl,-rpath=${glib:location}/lib -Wl,-rpath=${pango:location}/lib
[buildout]
extends =
../cmake/buildout.cfg
parts =
openjpeg
[openjpeg]
recipe = slapos.recipe.cmmi
shared = true
version = 2.5.2
url = https://github.com/uclouvain/openjpeg/archive/refs/tags/v${:version}.tar.gz
md5sum = f9ee64845881a15109ed0aa73a12202f
location = @@LOCATION@@
configure-command = ${cmake:location}/bin/cmake
configure-options =
-DCMAKE_BUILD_TYPE=Release
-DCMAKE_INSTALL_PREFIX=@@LOCATION@@
-DCMAKE_INSTALL_LIBDIR=lib
-DCMAKE_INSTALL_RPATH=${:location}/lib
......@@ -10,8 +10,8 @@ extends =
[openldap]
recipe = slapos.recipe.cmmi
shared = true
url = http://www.openldap.org/software/download/OpenLDAP/openldap-release/openldap-2.4.47.tgz
md5sum = e508f97bfd778fec7799f286e5c07176
url = https://www.openldap.org/software/download/OpenLDAP/openldap-release/openldap-2.6.8.tgz
md5sum = a7ca5f245340e478ea18b8f972c89bb1
pre-configure = cp -f ${gnu-config:location}/config.sub ${gnu-config:location}/config.guess build
configure-options =
--disable-static
......@@ -29,3 +29,8 @@ environment =
CPPFLAGS=-I${openssl:location}/include -I${cyrus-sasl:location}/include
LDFLAGS=-L${openssl:location}/lib -Wl,-rpath=${openssl:location}/lib -L${cyrus-sasl:location}/lib -Wl,-rpath=${cyrus-sasl:location}/lib
PATH=${groff:location}/bin:%(PATH)s
# old version for python-ldap-python
[openldap:python2]
url = http://www.openldap.org/software/download/OpenLDAP/openldap-release/openldap-2.4.47.tgz
md5sum = e508f97bfd778fec7799f286e5c07176
......@@ -15,12 +15,11 @@ max_version = 0
recipe = slapos.recipe.build:gitclone
git-executable = ${git:location}/bin/git
repository = https://lab.nexedi.com/nexedi/osie.git
revision = dd9aea8
revision = 7d6b1af
[osie-coupler]
recipe = slapos.recipe.cmmi
path = ${osie-repository:location}/coupler
bin_dir = ${:path}/bin/
environment =
PATH=${gcc:prefix}/bin:/usr/bin
C_INCLUDE_PATH=${open62541:location}/include:${open62541:location}/deps:${open62541:location}/src/pubsub
......
[buildout]
extends =
../cmake/buildout.cfg
../curl/buildout.cfg
../libtiff/buildout.cfg
../sqlite3/buildout.cfg
parts =
proj
[proj]
recipe = slapos.recipe.cmmi
shared = true
version = 9.4.0
url = https://download.osgeo.org/proj/proj-${:version}.tar.gz
md5sum = c33fd24cf4e3a3048c330b1b07e86b4f
configure-command = ${cmake:location}/bin/cmake
configure-options =
-DCMAKE_BUILD_TYPE=Release
-DEXE_SQLITE3=${sqlite3:location}/bin/sqlite3
-DSQLite3_INCLUDE_DIR=${sqlite3:location}/include
-DSQLite3_LIBRARY=${sqlite3:location}/lib/libsqlite3.so
-DCURL_INCLUDE_DIR=${curl:location}/include
-DCURL_LIBRARY=${curl:location}/lib/libcurl.so
-DTIFF_INCLUDE_DIR=${libtiff:location}/include
-DTIFF_LIBRARY_RELEASE=${libtiff:location}/lib/libtiff.so
-DCMAKE_INSTALL_PREFIX=@@LOCATION@@
-DCMAKE_INSTALL_LIBDIR=lib
-DCMAKE_INSTALL_RPATH=${curl:location}/lib:${libtiff:location}/lib:${sqlite3:location}/lib
[buildout]
parts =
proj4
[proj4]
recipe = slapos.recipe.cmmi
version = 4.8.0
url = http://download.osgeo.org/proj/proj-${:version}.tar.gz
md5sum = d815838c92a29179298c126effbb1537
configure-options =
--disable-dependency-tracking
......@@ -61,23 +61,29 @@ md5sum = dd94cab4541b57b88cf3dab32d6336e3
[python3.8]
<= python3-common
version = 3.8
package_version = 3.8.19
md5sum = 2532d25930266546822c144b99652254
package_version = 3.8.20
md5sum = 745478c81d6382cf46b5e7ad89e56008
[python3.9]
<= python3-common
version = 3.9
package_version = 3.9.19
md5sum = 87d0f8281237b972ff8b23e0e2c8d325
package_version = 3.9.20
md5sum = bdcda0fdb99e7e17018f6886fae5e1fd
[python3.10]
<= python3-common
version = 3.10
package_version = 3.10.14
md5sum = 05148354ce821ba7369e5b7958435400
package_version = 3.10.15
md5sum = 8b1faa1b193e4e90c0f17eb2decd89b5
[python3.11]
<= python3-common
version = 3.11
package_version = 3.11.9
md5sum = 22ea467e7d915477152e99d5da856ddc
package_version = 3.11.10
md5sum = af59e243df4c7019f941ae51891c10bc
[python3.12]
<= python3-common
version = 3.12
package_version = 3.12.7
md5sum = c6c933c1a0db52597cb45a7910490f93
......@@ -76,50 +76,3 @@ PKG_CONFIG_PATH-rbd =
[qemu:sys.version_info < (3,6)]
environment +=
PYTHON=${python3:executable}
[debian-netinst-base]
recipe = slapos.recipe.build:download
shared = true
filename = debian-${:version}-${:arch}-netinst.iso
url = https://cdimage.debian.org/cdimage/archive/${:archive}/${:arch}/iso-cd/${:filename}
archive = ${:version}
[debian-amd64-netinst-base]
<= debian-netinst-base
arch = amd64
[debian-amd64-jessie-netinst.iso]
<= debian-amd64-netinst-base
version = 8.11.1
md5sum = df0ce86d0b1d81e232ad08eef58754ed
[debian-amd64-stretch-netinst.iso]
<= debian-amd64-netinst-base
version = 9.13.0
md5sum = 6097fdb9cbab47c96471274b9044e983
[debian-amd64-buster-netinst.iso]
<= debian-amd64-netinst-base
version = 10.11.0
md5sum = 9d7b9cc850464d60ac174787c53e8f3f
[debian-amd64-bullseye-netinst.iso]
<= debian-amd64-netinst-base
version = 11.7.0
md5sum = b33775a9ab6eae784b6da9f31be48be3
[debian-amd64-bookworm-netinst.iso]
<= debian-amd64-netinst-base
version = 12.4.0
md5sum = a03cf771ba9513d908093101a094ac88
alternate-url = https://cdimage.debian.org/cdimage/release/current/${:arch}/iso-cd/${:filename}
[debian-amd64-netinst.iso]
<= debian-amd64-bookworm-netinst.iso
[debian-amd64-testing-netinst.iso]
<= debian-amd64-netinst-base
alternate-url = https://cdimage.debian.org/cdimage/${archive}/${:arch}/iso-cd/${:filename}
archive = bullseye_di_rc3
version = bullseye-DI-rc3
md5sum = 405917de7062c58357a3673c9901f0c4
......@@ -24,6 +24,6 @@ post-install =
# https://git.archlinux.org/svntogit/packages.git/tree/trunk/PKGBUILD?h=packages/sqlite
# NEO needs SQLITE_ENABLE_UPDATE_DELETE_LIMIT to drop partitions.
environment =
CPPFLAGS=-I${zlib:location}/include -DSQLITE_MAX_VARIABLE_NUMBER=250000
CPPFLAGS=-I${zlib:location}/include -DSQLITE_MAX_VARIABLE_NUMBER=250000 -DSQLITE_ENABLE_RTREE=1
LDFLAGS=-Wl,-rpath=@@LOCATION@@/lib -L${readline:location}/lib -Wl,-rpath=${readline:location}/lib -L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib
PATH=${tcl:location}/bin:${xz-utils:location}/bin:%(PATH)s
......@@ -3,7 +3,6 @@ extends =
../file/buildout.cfg
../openssh/buildout.cfg
../p7zip/buildout.cfg
../qemu-kvm/buildout.cfg
parts = vm-debian
......@@ -59,6 +58,54 @@ preseed.apt-setup/enable-source-repositories = false
preseed.recommends = false
preseed.tasks =
[debian-netinst-base]
recipe = slapos.recipe.build:download
shared = true
filename = debian-${:version}-${:arch}-netinst.iso
url = https://cdimage.debian.org/cdimage/archive/${:archive}/${:arch}/iso-cd/${:filename}
archive = ${:version}
[debian-amd64-netinst-base]
<= debian-netinst-base
arch = amd64
[debian-amd64-jessie-netinst.iso]
<= debian-amd64-netinst-base
version = 8.11.1
md5sum = df0ce86d0b1d81e232ad08eef58754ed
[debian-amd64-stretch-netinst.iso]
<= debian-amd64-netinst-base
version = 9.13.0
md5sum = 6097fdb9cbab47c96471274b9044e983
[debian-amd64-buster-netinst.iso]
<= debian-amd64-netinst-base
version = 10.11.0
md5sum = 9d7b9cc850464d60ac174787c53e8f3f
[debian-amd64-bullseye-netinst.iso]
<= debian-amd64-netinst-base
version = 11.7.0
md5sum = b33775a9ab6eae784b6da9f31be48be3
[debian-amd64-bookworm-netinst.iso]
<= debian-amd64-netinst-base
version = 12.4.0
md5sum = a03cf771ba9513d908093101a094ac88
alternate-url = https://cdimage.debian.org/cdimage/release/current/${:arch}/iso-cd/${:filename}
[debian-amd64-netinst.iso]
<= debian-amd64-bookworm-netinst.iso
[debian-amd64-testing-netinst.iso]
<= debian-amd64-netinst-base
alternate-url = https://cdimage.debian.org/cdimage/${archive}/${:arch}/iso-cd/${:filename}
archive = bullseye_di_rc3
version = bullseye-DI-rc3
md5sum = 405917de7062c58357a3673c9901f0c4
[debian-stable]
x86_64.iso = debian-amd64-netinst.iso
x86_64.kernel = install.amd/vmlinuz
......
......@@ -43,6 +43,8 @@ inline =
[versions]
freezegun = 1.5.1:whl
ZopeUndo = 6.0
[versions:python2]
freezegun = 0.3.15
ZopeUndo = 5.0
......@@ -2,5 +2,8 @@
[buildout]
extends =
test-zodb5.cfg
test-common.cfg
test-py2.cfg
[ZODB]
major = 5
# SlapOS software release to test zodbtools/ZODB5-py3 on Nexedi testing infrastructure.
# SlapOS software release to test zodbtools/ZODB6-py3 on Nexedi testing infrastructure.
[buildout]
extends = test-common.cfg
[ZODB]
major = 5
major = 6
[instance-profile]
filename = instance.cfg.in
md5sum = 4dc7ebc5f38baca776f520e7f5ccf9a7
md5sum = eafb0d0c2137516e884cde56b7016270
......@@ -29,9 +29,6 @@ key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
configuration.runtime_plc_url =
configuration.runtime_plc_md5sum =
configuration.autostart = 1
configuration.interface = 127.0.0.1
configuration.port = 8009
# Create all needed directories, depending on your needs
[directory]
......@@ -47,6 +44,10 @@ log = ${:var}/log
[beremiz-runtime]
logfile = ${directory:log}/beremiz-runtime.log
recipe = slapos.cookbook:wrapper
# default webport in Beremiz
webport = 8009
# adding BEREMIZPYTHONPATH is needed so we can override the Beremiz'
# internal code which tries to use sys.executable to spawn processes
# and in the context of SlapOS it's a plain Python without needed modules
......@@ -56,6 +57,7 @@ environment =
BEREMIZPYTHONPATH = {{ buildout['bin-directory'] }}/pythonwitheggs
PATH={{ gcc_location }}/bin
LIBRARY_PATH={{ openssl_location }}/lib
BEREMIZ_LOCAL_HOST=${instance-parameter:ipv4-random}
command-line =
{{ buildout['bin-directory'] }}/pythonwitheggs {{ buildout['directory'] }}/parts/beremiz-source/Beremiz_cli.py -k --project-home ${directory:home}/parts/download-plc/ build transfer run
......@@ -64,10 +66,9 @@ wrapper-path = ${directory:service}/beremiz-runtime
[http-promise]
recipe = slapos.cookbook:check_port_listening
path = ${directory:promise}/${:_buildout_section_name_}
hostname = ${instance-parameter:configuration.interface}
port = ${instance-parameter:configuration.port}
hostname = ${instance-parameter:ipv4-random}
port = ${beremiz-runtime:webport}
[publish-connection-parameter]
recipe = slapos.cookbook:publish
port = ${instance-parameter:configuration.port}
interface = ${instance-parameter:configuration.interface}
beremiz_runtime_url = http://${instance-parameter:ipv4-random}:${beremiz-runtime:webport}
......@@ -51,6 +51,8 @@ setup(name=name,
'cryptography',
'pexpect',
'pyOpenSSL',
'ZEO',
'zodburi',
],
test_suite='test',
)
import glob
import ipaddress
import json
import logging
......@@ -267,17 +266,13 @@ class TestLog(BalancerTestCase, CrontabMixin):
# crontab for apachedex is executed
self._executeCrontabAtDate('generate-apachedex-report', '23:59')
# it creates a report for the day
apachedex_report, = glob.glob(
os.path.join(
self.computer_partition_root_path,
'srv',
'monitor',
'private',
'apachedex',
'ApacheDex-*.html',
))
with open(apachedex_report) as f:
report_text = f.read()
apachedex_report, = (
self.computer_partition_root_path
/ 'srv'
/ 'monitor'
/ 'private'
/ 'apachedex').glob('ApacheDex-*.html')
report_text = apachedex_report.read_text()
self.assertIn('APacheDEX', report_text)
# having this table means that apachedex could parse some lines.
self.assertIn('<h2>Hits per status code</h2>', report_text)
......@@ -318,8 +313,8 @@ class TestLog(BalancerTestCase, CrontabMixin):
self.assertEqual(
requests.get(self.default_balancer_zope_url, verify=False).status_code,
requests.codes.service_unavailable)
with open(os.path.join(self.computer_partition_root_path, 'var', 'log', 'apache-error.log')) as error_log_file:
error_line = error_log_file.read().splitlines()[-1]
error_log_file = self.computer_partition_root_path / 'var' / 'log' / 'apache-error.log'
error_line = error_log_file.read_text().splitlines()[-1]
self.assertIn('backend default has no server available!', error_line)
# this log also include a timestamp
self.assertRegex(error_line, r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}')
......@@ -416,7 +411,7 @@ class TestBalancer(BalancerTestCase):
# real time statistics can be obtained by using the stats socket and there
# is a wrapper which makes this a bit easier.
socat_process = subprocess.Popen(
[self.computer_partition_root_path + '/bin/haproxy-socat-stats'],
[self.computer_partition_root_path / 'bin' / 'haproxy-socat-stats'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
......@@ -604,14 +599,10 @@ class TestServerTLSEmbeddedCaucase(BalancerTestCase):
balancer_parsed_url.port)
# run caucase updater in the future, so that certificate is renewed
caucase_updater, = glob.glob(
os.path.join(
self.computer_partition_root_path,
'etc',
'service',
'caucase-updater-haproxy-certificate-*',
))
process = pexpect.spawnu("faketime +90days " + caucase_updater)
caucase_updater, = (
self.computer_partition_root_path / 'etc' / 'service'
).glob('caucase-updater-haproxy-certificate-*')
process = pexpect.spawnu(f"faketime +90days {caucase_updater}")
logger = self.logger
class DebugLogFile:
def write(self, msg):
......@@ -953,21 +944,16 @@ class TestClientTLS(BalancerTestCase):
# We have two services in charge of updating CRL and CA certificates for
# each frontend CA, plus the one for the balancer's own certificate
caucase_updater_list = glob.glob(
os.path.join(
self.computer_partition_root_path,
'etc',
'service',
'caucase-updater-*',
))
caucase_updater_list = list((
self.computer_partition_root_path / 'etc' / 'service'
).glob('caucase-updater-*'))
self.assertEqual(len(caucase_updater_list), 3)
# find the one corresponding to this caucase
for caucase_updater_candidate in caucase_updater_list:
with open(caucase_updater_candidate) as f:
if caucase.url in f.read():
caucase_updater = caucase_updater_candidate
break
if caucase.url in caucase_updater_candidate.read_text():
caucase_updater = caucase_updater_candidate
break
else:
self.fail("Could not find caucase updater script for %s" % caucase.url)
......
......@@ -477,8 +477,8 @@ class TestSeleniumTestRunner(ERP5InstanceTestCase, TestPublishedURLIsReachableMi
}
def test_test_runner_configuration_json_file(self):
runUnitTest_script, = glob.glob(
self.computer_partition_root_path + "/../*/bin/runUnitTest.real")
runUnitTest_script, = self.computer_partition_root_path.glob(
"../*/bin/runUnitTest.real")
config_file = None
with open(runUnitTest_script) as f:
for line in f:
......@@ -504,8 +504,8 @@ class TestDisableTestRunner(ERP5InstanceTestCase, TestPublishedURLIsReachableMix
"""
# self.computer_partition_root_path is the path of root partition.
# we want to assert that no scripts exist in any partition.
bin_programs = list(map(os.path.basename,
glob.glob(self.computer_partition_root_path + "/../*/bin/*")))
bin_programs = [
p.name for p in self.computer_partition_root_path.glob("../*/bin/*")]
self.assertTrue(bin_programs) # just to check the glob was correct.
self.assertNotIn('runUnitTest', bin_programs)
......
import contextlib
import subprocess
import json
import zodburi
from ZODB.DB import DB
from slapos.testing.utils import CrontabMixin
from . import ERP5InstanceTestCase, default, matrix, setUpModule, ERP5PY3
_ = setUpModule
class ZEOTestCase(ERP5InstanceTestCase):
__test_matrix__ = matrix((default,))
@classmethod
def getInstanceSoftwareType(cls) -> str:
return "zodb-zeo"
@classmethod
def _getInstanceParameterDict(cls) -> dict:
return {
"tcpv4-port": 8000,
"computer-memory-percent-threshold": 100,
"name": cls.__name__,
"monitor-passwd": "secret",
"zodb-dict": {"root": {}},
}
@classmethod
def getInstanceParameterDict(cls) -> dict:
return {"_": json.dumps(cls._getInstanceParameterDict())}
def setUp(self) -> None:
self.storage_dict = json.loads(
self.computer_partition.getConnectionParameterDict()["_"]
)["storage-dict"]
def db(self) -> contextlib.AbstractContextManager[DB]:
root = self.storage_dict["root"]
zeo_uri = f"zeo://{root['server']}?storage={root['storage']}"
storage_factory, dbkw = zodburi.resolve_uri(zeo_uri)
return contextlib.closing(DB(storage_factory(), **dbkw))
class TestRepozo(ZEOTestCase, CrontabMixin):
__partition_reference__ = "rpz"
def test_backup_and_restore(self) -> None:
def check_state():
(self.computer_partition_root_path / ".timestamp").unlink()
self.waitForInstance()
if ERP5PY3:
with self.db() as db:
with db.transaction() as cnx:
self.assertEqual(cnx.root.state, "before backup")
if ERP5PY3:
# as it is not possible to connect to a python2 ZEO server
# from a python3 client, we check more when the server is python3
with self.db() as db:
with db.transaction() as cnx:
cnx.root.state = "before backup"
check_state()
self._executeCrontabAtDate("tidstorage", "2000-01-01 UTC")
dat, fsz, index = sorted(
[
p.name
for p in (
self.computer_partition_root_path / "srv" / "backup" / "zodb" / "root"
).glob("*")
]
)
self.assertRegex(dat, r'2000-01-01-00-\d\d-\d\d.dat')
self.assertRegex(fsz, r'2000-01-01-00-\d\d-\d\d.fsz')
self.assertRegex(index, r'2000-01-01-00-\d\d-\d\d.index')
if ERP5PY3:
with self.db() as db:
with db.transaction() as cnx:
cnx.root.state = "after backup"
db.close()
restore_script = self.computer_partition_root_path / "srv" / "runner-import-restore"
self.assertTrue(restore_script.exists())
status, restore_output = subprocess.getstatusoutput(str(restore_script))
self.assertEqual(status, 1)
self.assertIn("Zeo is already running", restore_output)
with self.slap.instance_supervisor_rpc as supervisor:
supervisor.stopAllProcesses()
restore_output = subprocess.check_output(restore_script)
check_state()
......@@ -15,28 +15,27 @@
[instance-profile]
filename = instance.cfg.in
md5sum = 8c9dc41c176ba01116de5b71aaa704de
md5sum = 32c772c593d2c3c38c26186b91b78cf8
[instance-default]
filename = instance-default.cfg.in
md5sum = b4330fbe0c9c3631f4f477c06d3460b3
[instance-agent]
filename = instance-agent.cfg.in
md5sum = 6bbc97cf8e752d22773d5f23ecdda37d
[influxdb-config-file]
filename = influxdb-config-file.cfg.in
md5sum = a28972ced3e0f4aa776e43a9c44717c0
[telegraf-config-file]
filename = telegraf-config-file.cfg.in
md5sum = a1a9c22c2a7829c66a49fc2504604d21
[grafana-config-file]
filename = grafana-config-file.cfg.in
md5sum = e255dcca466f5de51698d24cbd114577
md5sum = 2b75d6b1984d9d154303ec773aa88474
[grafana-provisioning-config-file]
filename = grafana-provisioning-config-file.cfg.in
md5sum = 3aa0f1ed752b2a59ea2b5e7c1733daf3
[grafana-provisioning-dashboards-config-file]
filename = grafana-provisioning-dashboards-config-file.cfg.in
md5sum = 5616679a9c5c2757540175ead3f5500a
[loki-config-file]
filename = loki-config-file.cfg.in
md5sum = ad2baf4599a937d7352034a41fa24814
[promtail-config-file]
filename = promtail-config-file.cfg.in
md5sum = 5f1b3a1a3d3f98daeab4780106452d71
......@@ -154,7 +154,7 @@ reporting_enabled = true
# in some UI views to notify that grafana or plugin update exists
# This option does not cause any auto updates, nor send any information
# only a GET request to https://grafana.com to get latest versions
check_for_updates = true
check_for_updates = false
# Google Analytics universal tracking code, only enabled if you specify an id here
google_analytics_ua_id =
......@@ -334,23 +334,21 @@ allow_sign_up = true
#################################### SMTP / Emailing #####################
[smtp]
{% set email = slapparameter_dict.get('email', {}) %}
#enabled = false
enabled = {{ slapparameter_dict.get('smtp-server') and 'true' or 'false' }}
enabled = {{ email.get('smtp-server') and 'true' or 'false' }}
#host = locahost:25
host = {{ slapparameter_dict.get('smtp-server', '') }}
host = {{ email.get('smtp-server', '') }}
#user =
user = {{ slapparameter_dict.get('smtp-username', '') }}
user = {{ email.get('smtp-username', '') }}
# If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
#password =
password = {{ slapparameter_dict.get('smtp-password', '') and '"""%s"""' % slapparameter_dict['smtp-password'] or ""}}
password = {{ email.get('smtp-password', '') and '"""%s"""' % email['smtp-password'] or ""}}
cert_file =
key_file =
#skip_verify = false
skip_verify = {{ slapparameter_dict.get('smtp-verify-ssl', 'true').lower() == 'true' and 'false' or 'true' }}
#from_address = admin@grafana.localhost
from_address = {{ slapparameter_dict.get('email-from-address', '') }}
#from_name = Grafana
from_name = {{ slapparameter_dict.get('email-from-name', 'Grafana') }}
skip_verify = {{ email.get('smtp-verify-ssl') and 'false' or 'true' }}
from_address = {{ email.get('email-from-address', '') }}
from_name = {{ email.get('email-from-name', 'Grafana') }}
ehlo_identity =
[emails]
......
# https://grafana.com/docs/administration/provisioning/#example-datasource-config-file
apiVersion: 1
datasources:
- name: telegraf
type: influxdb
access: proxy
url: {{ influxdb['url'] }}
user: {{ influxdb['auth-username'] }}
database: telegraf
isDefault: true
jsonData:
tlsSkipVerify: true
secureJsonData:
password: {{ influxdb['auth-password'] }}
version: 1
editable: false
- name: loki
type: loki
access: proxy
url: {{ loki['url'] }}
version: 1
editable: false
# https://grafana.com/docs/grafana/latest/administration/provisioning/#dashboards
apiVersion: 1
providers:
- name: SlapOS
folder: ''
updateIntervalSeconds: 10
allowUiUpdates: false
options:
path: {{ dashboards_dir }}
{
"$schema": "https://json-schema.org/draft/2019-09/schema",
"description": "Parameters to instantiate an agent collecting logs and metrics",
"type": "object",
"additionalProperties": false,
"unevaluatedProperties": false,
"$defs": {
"type": {
"description": "Type of the application. With `SlapOS` type, some metrics are collected from supervisor and from some known partition types (for example: ERP5's mariadb or ERP5's zopes). With `system` type, only log files are ingested.",
"type": "string",
"default": "SlapOS",
"enum": [
"SlapOS",
"system"
]
},
"name": {
"description": "Name of this application",
"type": "string"
},
"urls": {
"description": "URLs to monitor for availability and certificate lifetime",
"type": "array",
"items": {
"type": "string"
}
},
"log-file-patterns": {
"type": "array",
"items": {
"type": "string"
},
"description": "Glob patterns for watched log files."
},
"static-tags": {
"type": "object",
"description": "Static tags for this partition",
"examples": [
{
"service-level": "production",
"data-center": "abc123"
}
]
}
},
"required": [
"applications",
"influxdb",
"loki"
],
"properties": {
"applications": {
"description": "Applications to monitor",
"type": "array",
"items": {
"oneOf": [
{
"type": "object",
"additionalProperties": false,
"description": "Configuration for SlapOS type application",
"required": [
"type",
"name",
"instance-root",
"partitions"
],
"properties": {
"type": {
"$ref": "#/$defs/type",
"const": "SlapOS"
},
"name": {
"$ref": "#/$defs/name"
},
"urls": {
"$ref": "#/$defs/urls"
},
"instance-root": {
"description": "Directory containing SlapOS partitions.",
"type": "string",
"examples": [
"/srv/slapgrid/",
"/srv/slapgrid/slappart30/srv/runner/instance/"
]
},
"partitions": {
"description": "SlapOS partitions to monitor",
"type": "array",
"items": {
"type": "object",
"required": [
"name",
"reference"
],
"unevaluatedProperties": false,
"properties": {
"name": {
"type": "string",
"description": "Friendly name of the partition",
"examples": [
"mariadb",
"zope-activity"
]
},
"reference": {
"type": "string",
"description": "Reference of the partition",
"examples": [
"slappart1",
"slappart2"
]
},
"type": {
"type": "string",
"description": "Type of the partition. Known types have metrics and logs collected",
"enum": [
"erp5/mariadb",
"erp5/balancer",
"erp5/zope-activity",
"erp5/zope-front",
"erp5/zeo",
"mariadb",
"default"
],
"default": "default"
},
"log-file-patterns": {
"$ref": "#/$defs/log-file-patterns",
"description": "Glob pattern for log files to watch. This mostly makes sense for `default` partition type. `{partition_root_directory}` python `.format`-style substitution variable is supported."
},
"static-tags": {
"$ref": "#/$defs/static-tags"
}
},
"allOf": [
{
"if": {
"properties": {
"type": {
"enum": [
"mariadb",
"erp5/mariadb"
]
}
}
},
"then": {
"properties": {
"dbname": {
"type": "string",
"description": "Database name"
},
"username": {
"type": "string",
"description": "Username to connect to database"
}
}
}
}
],
"examples": [
{
"name": "zope-backoffice",
"type": "erp5/zope-front",
"reference": "slappart1",
"static-tags": {
"instance": "instance-name"
}
},
{
"name": "mariadb",
"type": "erp5/mariadb",
"reference": "slappart2"
},
{
"name": "Theia",
"type": "default",
"log-file-patterns": [
"{partition_root_directory}/.slappart*log"
]
}
]
}
}
}
},
{
"type": "object",
"additionalProperties": false,
"description": "Configuration for `system` type application",
"required": [
"type",
"name"
],
"properties": {
"type": {
"$ref": "#/$defs/type",
"const": "system"
},
"name": {
"$ref": "#/$defs/name"
},
"urls": {
"$ref": "#/$defs/urls"
},
"partitions": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {
"type": "string",
"description": "Friendly name of the partition",
"examples": [
"syslog",
"email"
]
},
"log-file-patterns": {
"$ref": "#/$defs/log-file-patterns"
},
"static-tags": {
"$ref": "#/$defs/static-tags"
}
},
"examples": [
{
"name": "syslog",
"log-file-patterns": [
"/var/log/syslog"
]
},
{
"name": "kernel",
"log-file-patterns": [
"/var/log/kern.log",
"/var/log/messages"
]
},
{
"name": "re6stnet",
"log-file-patterns": [
"/var/log/re6stnet/*.log"
]
}
]
}
}
}
}
]
}
},
"influxdb": {
"description": "Connection information for influxdb",
"type": "object",
"additionalProperties": false,
"required": [
"url",
"database",
"username",
"password"
],
"properties": {
"url": {
"description": "IPv6 URL of influxdb HTTP endpoint",
"format": "uri",
"type": "string"
},
"database": {
"description": "database created in influxdb",
"type": "string"
},
"username": {
"description": "username for influxdb",
"type": "string"
},
"password": {
"description": "password for influxdb user",
"type": "string"
}
}
},
"loki": {
"description": "Connection information for loki",
"type": "object",
"additionalProperties": false,
"required": [
"url",
"caucase-url"
],
"properties": {
"url": {
"description": "Base URL of Loki",
"format": "uri",
"type": "string"
},
"caucase-url": {
"description": "URL caucase service used by Loki",
"format": "uri",
"type": "string"
}
}
}
}
}
{
"$schema": "http://json-schema.org/draft-07/schema#",
"description": "Values returned by agent instantiation",
"additionalProperties": false,
"properties": {
"telegraf-extra-config-dir": {
"description": "Directory in telegraf partition where extra configuration file will be loaded. These files must match *.conf pattern",
"type": "string"
},
"promtail-url": {
"description": "URL of embedded server from promtail",
"format": "uri",
"type": "string"
},
"facl-script": {
"description": "Path of a generated script to set ACL for the agent to access files and sockets. This might be needed depending on how slapos partitions were formatted",
"type": "string"
}
},
"type": "object"
}
This diff is collapsed.
{
"$schema": "https://json-schema.org/draft/2019-09/schema",
"description": "Parameters to instantiate Grafana",
"type": "object",
"additionalProperties": false,
"properties": {
"email": {
"type": "object",
"description": "Email configuration",
"additionalProperties": false,
"properties": {
"smtp-server": {
"description": "SMTP server used by Grafana to send emails (in host:port format). Leaving this empty will disable email sending.",
"type": "string"
},
"smtp-username": {
"description": "Username to connect to SMTP server",
"type": "string"
},
"smtp-password": {
"description": "Password to connect to SMTP server",
"type": "string"
},
"smtp-verify-ssl": {
"description": "Verify certificate of SMTP server",
"type": "boolean",
"default": true
},
"email-from-address": {
"description": "Email address used in `From:` header of emails",
"type": "string"
},
"email-from-name": {
"description": "Name used in `From:` header of emails",
"default": "Grafana",
"type": "string"
}
}
},
"frontend": {
"type": "object",
"additionalProperties": false,
"properties": {
"custom-domain": {
"description": "Custom domain to use when requesting a rapid-cdn frontend",
"type": "string",
"format": "hostname"
}
}
},
"caucase": {
"type": "object",
"description": "Caucase configuration. To connect external agents, it's required to approve their client certificates, either using an external caucase referenced as `external-caucase-url` or registering a user with `user-auto-approve-count`",
"additionalProperties": false,
"properties": {
"external-caucase-url": {
"description": "URL of a caucase instance to manage all server and clients certificates, to use instead of embedding caucase",
"type": "string",
"format": "uri"
},
"user-auto-approve-count": {
"description": "Number of users to automatically approve in the embedded caucase",
"type": "integer",
"default": 0
}
}
},
"influxdb": {
"description": "Fine tuning influxdb parameters",
"type": "object",
"additionalProperties": false,
"properties": {
"default-retention-policy-days": {
"description": "Number of days to keep metrics data",
"default": 720,
"type": "integer"
}
}
},
"loki": {
"description": "Fine tuning loki parameters",
"type": "object",
"additionalProperties": false,
"properties": {
"retention-period-days": {
"description": "Number of days to keep log data",
"default": 60,
"type": "integer"
}
}
},
"agent": {
"type": "object",
"properties": {
"applications": {
"$ref": "./instance-agent-input-schema.json#properties/applications"
}
}
}
}
}
{
"$schema": "http://json-schema.org/draft-04/schema#",
"$schema": "http://json-schema.org/draft-07/schema#",
"description": "Values returned by Grafana instantiation",
"additionalProperties": false,
"properties": {
"url": {
"description": "Shared frontend for this Grafana instance",
"pattern": "^https://",
"format": "uri",
"type": "string"
},
"grafana-username": {
......@@ -18,12 +17,12 @@
},
"grafana-url": {
"description": "IPv6 URL to access grafana",
"pattern": "^https://",
"format": "uri",
"type": "string"
},
"influxdb-url": {
"description": "IPv6 URL of influxdb HTTP endpoint",
"pattern": "^https://",
"format": "uri",
"type": "string"
},
"influxdb-database": {
......@@ -38,8 +37,23 @@
"description": "password for influxdb user",
"type": "string"
},
"telegraf-extra-config-dir": {
"description": "Directory in telegraf partition where extra configuration file will be loaded. These files must match *.conf pattern",
"loki-url": {
"description": "Base URL of Loki",
"format": "uri",
"type": "string"
},
"loki-caucase-url": {
"description": "URL caucase service used by Loki",
"format": "uri",
"type": "string"
},
"agent-promtail-url": {
"description": "URL of embedded server from promtail",
"format": "uri",
"type": "string"
},
"agent-facl-script": {
"description": "Path of a generated script to set ACL for the agent to access files and sockets. This might be needed depending on how slapos partitions were formatted",
"type": "string"
}
},
......
This diff is collapsed.
{
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Parameters to instantiate Grafana",
"type": "object",
"additionalProperties": false,
"properties": {
"smtp-server": {
"description": "SMTP server used by grafana to send emails (in host:port format). Leaving this empty will disable email sending.",
"type": "string"
},
"smtp-username": {
"description": "Username to connect to SMTP server",
"type": "string"
},
"smtp-password": {
"description": "Password to connect to SMTP server",
"type": "string"
},
"smtp-verify-ssl": {
"description": "Verify SSL certificate of SMTP server",
"type": "string",
"enum": [
"true",
"false"
]
},
"email-from-address": {
"description": "Email address used in From: header of emails",
"type": "string"
},
"email-from-name": {
"description": "Name used in From: header of emails",
"default": "Grafana",
"type": "string"
},
"promtail-extra-scrape-config": {
"description": "Raw promtail config (experimental parameter, see https://github.com/grafana/loki/blob/v0.3.0/docs/promtail.md#scrape-configs for detail)",
"default": "",
"type": "string"
}
}
}
This diff is collapsed.
auth_enabled: false
server:
http_listen_address: {{ loki['ip'] }}
http_listen_port: {{ loki['port'] }}
grpc_listen_address: {{ loki['ip'] }}
grpc_listen_port: {{ loki['grpc-port'] }}
ingester:
lifecycler:
address: {{ loki['ip'] }}
ring:
kvstore:
store: inmemory
replication_factor: 1
chunk_idle_period: 15m
schema_config:
configs:
- from: 2018-04-15
store: boltdb
object_store: filesystem
schema: v9
index:
prefix: index_
period: 168h
storage_config:
boltdb:
directory: {{ loki['storage-boltdb-dir'] }}
filesystem:
directory: {{ loki['storage-filesystem-dir'] }}
limits_config:
enforce_metric_name: false
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 0
table_manager:
chunk_tables_provisioning:
inactive_read_throughput: 0
inactive_write_throughput: 0
provisioned_read_throughput: 0
provisioned_write_throughput: 0
index_tables_provisioning:
inactive_read_throughput: 0
inactive_write_throughput: 0
provisioned_read_throughput: 0
provisioned_write_throughput: 0
retention_deletes_enabled: false
retention_period: 0
server:
http_listen_address: {{ promtail['ip'] }}
http_listen_port: {{ promtail['http-port'] }}
grpc_listen_address: {{ promtail['ip'] }}
grpc_listen_port: {{ promtail['grpc-port'] }}
external_url: {{ promtail['url'] }}
positions:
filename: {{ promtail['dir'] }}/positions.yaml
clients:
- url: {{ loki['url'] }}/api/prom/push
scrape_configs:
- job_name: test
static_configs:
- targets:
- localhost
labels:
job: grafanalogs
__path__: ./var/log/*log
{{ slapparameter_dict.get('promtail-extra-scrape-config', '') }}
[buildout]
extends =
../../stack/slapos.cfg
../../stack/caucase/buildout.cfg
../../stack/nodejs.cfg
../../component/make/buildout.cfg
../../component/golang/buildout.cfg
../../component/openssl/buildout.cfg
../../component/curl/buildout.cfg
../../component/dash/buildout.cfg
../../component/jq/buildout.cfg
../../component/systemd/buildout.cfg
../../component/fluent-bit/buildout.cfg
buildout.hash.cfg
versions = versions
parts =
slapos-cookbook
instance-profile
gowork
influxdb-config-file
telegraf-config-file
grafana-config-file
grafana-provisioning-config-file
loki-config-file
promtail-config-file
grafana-provisioning-dashboards-config-file
fluent-bit
post-install-cleanup
[nodejs]
<= nodejs-14.16.0
[go_github.com_grafana_grafana]
<= go-git-package
go.importpath = github.com/grafana/grafana
repository = https://github.com/grafana/grafana
revision = v7.5.2-0-gca413c612f
revision = v10.1.2-0-g8e428858dd
[go_github.com_grafana_loki]
<= go-git-package
go.importpath = github.com/grafana/loki
repository = https://github.com/perrinjerome/loki
revision = v2.2.1-1-gda6d45f2
repository = https://github.com/grafana/loki
revision = v3.1.0-0-g935aee77e
[go_github.com_influxdata_influxdb]
<= go-git-package
......@@ -46,49 +46,80 @@ revision = v1.8.4-0-gbc8ec4384e
<= go-git-package
go.importpath = github.com/influxdata/telegraf
repository = https://github.com/influxdata/telegraf
revision = v1.17.3-0-g24a552b90b
revision = v1.28.1-0-g3ea9ffbe2
[go_github.com_perrinjerome_slapos_telegraf_input]
<= go-git-package
go.importpath = github.com/perrinjerome/telegraf-input-slapos
repository = https://github.com/perrinjerome/telegraf-input-slapos
revision = v0.0.2-0-gd4c5221
[go_github.com_prometheus_prometheus]
<= go-git-package
go.importpath = github.com/prometheus/prometheus
repository = https://github.com/prometheus/prometheus
revision = v0.41.0-0-gc0d8a56c6
# [go_github.com_jaegertracking_jaeger]
# <= go-git-package
# go.importpath = github.com/jaegertracking/jaeger
# repository = https://github.com/jaegertracking/jaeger
# revision = v1.20.0-623-gcac21f82
[gowork]
# Fails with current default golang1.18
golang = ${golang1.17:location}
install =
${go_github.com_grafana_loki:location}:./cmd/loki
${go_github.com_grafana_loki:location}:./cmd/promtail
${go_github.com_grafana_loki:location}:./clients/cmd/promtail
${go_github.com_grafana_loki:location}:./cmd/logcli
${go_github.com_influxdata_telegraf:location}:./cmd/...
${go_github.com_influxdata_influxdb:location}:./cmd/...
${go_github.com_perrinjerome_slapos_telegraf_input:location}:./...
${go_github.com_prometheus_prometheus:location}:./cmd/...
# disable cgo, to prevent loki/promtail from using go-systemd
environment =
CGO_ENABLED = 0
CGO_ENABLED=1
CGO_CFLAGS=-I${systemd:location}/include
buildflags =
-tags promtail_journal_enabled
cpkgpath =
${systemd:location}
telegraf-bin = ${:bin}/telegraf
telegraf-input-slapos-bin = ${:bin}/telegraf-input-slapos
influx-bin = ${:bin}/influx
influxd-bin = ${:bin}/influxd
grafana-bin = ${:bin}/grafana-server
grafana-bin = ${grafana:binpath}/grafana
grafana-homepath = ${grafana:homepath}
loki-bin = ${:bin}/loki
promtail-bin = ${:bin}/promtail
[post-install-cleanup]
recipe = plone.recipe.command
stop-on-error = true
# remove caches and binary files confusing software check
command =
chmod +w ${gowork.dir:directory}/pkg/mod/github.com/gabriel-vasile/mimetype@v1.4.2/testdata/ \
&& rm -rf ${gowork.dir:directory}/pkg/mod/github.com/gabriel-vasile/mimetype@v1.4.2/testdata/so.so \
&& chmod -w ${gowork.dir:directory}/pkg/mod/github.com/gabriel-vasile/mimetype@v1.4.2/testdata/ \
&& rm -rf ${buildout:directory}/.cache/
[grafana]
recipe = plone.recipe.command
command = bash -c "
cd ${:homepath} &&
. ${gowork:env.sh} &&
# Unlike the loki, grafana _needs_ CGO_ENABLED, so we override here
export CGO_ENABLED=1 &&
command = bash -ce "
cd ${:homepath} && \
. ${gowork:env.sh} && \
go install github.com/google/wire/cmd/wire@v0.5.0 && \
wire gen -tags oss ./pkg/server ./pkg/cmd/grafana-cli/runner && \
go run build.go setup && \
go run build.go build && \
${yarn:location}/bin/yarn install --pure-lockfile && \
export NODE_OPTIONS=--max_old_space_size=8192 && \
${yarn:location}/bin/yarn install --immutable && \
${yarn:location}/bin/yarn run themes:generate && \
${yarn:location}/bin/yarn run build && \
${yarn:location}/bin/yarn run plugins:build-bundled && \
# Cleanup yarn and Cypress caches
rm -rf ${buildout:directory}/.cache/Cypress/ && \
rm -rf ${buildout:directory}/.cache/yarn/
"
${yarn:location}/bin/yarn run plugins:build-bundled"
homepath = ${go_github.com_grafana_grafana:location}
# XXX "linux-amd64" is not portable here
binpath = ${go_github.com_grafana_grafana:location}/bin/linux-amd64
stop-on-error = true
[download-file-base]
......@@ -98,19 +129,22 @@ url = ${:_profile_base_location_}/${:filename}
[influxdb-config-file]
<= download-file-base
[telegraf-config-file]
<= download-file-base
[grafana-config-file]
<= download-file-base
[grafana-provisioning-config-file]
[grafana-provisioning-dashboards-config-file]
<= download-file-base
[loki-config-file]
[instance-eggs]
recipe = zc.recipe.egg
eggs =
toml
[instance-agent]
<= download-file-base
[promtail-config-file]
[instance-default]
<= download-file-base
[instance-profile]
......@@ -120,8 +154,11 @@ output = ${buildout:directory}/instance.cfg
extensions = jinja2.ext.do
context =
section buildout buildout
key instance_default instance-default:target
key instance_agent instance-agent:target
key openssl_bin openssl-output:openssl
key telegraf_bin gowork:telegraf-bin
key telegraf_input_slapos_bin gowork:telegraf-input-slapos-bin
key influxd_bin gowork:influxd-bin
key influx_bin gowork:influx-bin
key grafana_bin gowork:grafana-bin
......@@ -130,8 +167,13 @@ context =
key promtail_bin gowork:promtail-bin
key curl_bin :curl-bin
key dash_bin :dash-bin
key jq_bin :jq-bin
key caucase_jinja2_library caucase-jinja2-library:target
curl-bin = ${curl:location}/bin/curl
dash-bin = ${dash:location}/bin/dash
jq-bin = ${jq:location}/bin/jq
depends = ${instance-eggs:eggs} ${caucase-eggs:eggs}
[versions]
inotifyx = 0.2.2
toml = 0.10.2
{
"name": "Grafana",
"description": "Grafana, Telegraf and Influxdb",
"serialisation": "xml",
"description": "Grafana, Influxdb, Loki and Telegraf",
"serialisation": "json-in-xml",
"software-type": {
"default": {
"title": "Default",
"description": "Grafana, Telegraf and Influxdb in same partition",
"request": "instance-input-schema.json",
"response": "instance-output-schema.json",
"description": "Grafana, Influxdb and Loki",
"request": "instance-default-input-schema.json",
"response": "instance-default-output-schema.json",
"index": 0
},
"agent": {
"title": "Agent",
"description": "Telegraf agent sending metrics to Influxdb and Promtail agent sending logs to Loki",
"request": "instance-agent-input-schema.json",
"response": "instance-agent-output-schema.json",
"index": 0
}
}
......
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared plugins.
# Even if a plugin has no configuration, it must be declared in here
# to be active. Declaring a plugin means just specifying the name
# as a section with no variables. To deactivate a plugin, comment
# out the name and any variables.
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
# file would generate.
# One rule that plugins conform to is wherever a connection string
# can be passed, the values '' and 'localhost' are treated specially.
# They indicate to the plugin to use their own builtin configuration to
# connect to the local system.
# NOTE: The configuration has a few required parameters. They are marked
# with 'required'. Be sure to edit those to make this configuration work.
# Tags can also be specified via a normal map, but only one form at a time:
[tags]
# dc = "us-east-1"
# Configuration for telegraf agent
[agent]
# Default data collection interval for all plugins
interval = "10s"
# Rounds collection interval to 'interval'
# ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
# Default data flushing interval for all outputs. You should not set this below
# interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
# Jitter the flush interval by a random amount. This is primarily to avoid
# large write spikes for users running a large number of telegraf instances.
# ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
# Run telegraf in debug mode
debug = false
# Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
[outputs]
# Configuration for influxdb server to send metrics to
[outputs.influxdb]
# The full HTTP or UDP endpoint URL for your InfluxDB instance
# Multiple urls can be specified for InfluxDB cluster support.
# urls = ["udp://localhost:8089"] # UDP endpoint example
# XXX XXX XXX
#urls = ["http://localhost:8086"] # required
urls = ["{{ influxdb['url'] }}"]
insecure_skip_verify = true # because we are using a self signed certificate
# The target database for metrics (telegraf will create it if not exists)
database = "{{ influxdb['database'] }}" # required
# Precision of writes, valid values are n, u, ms, s, m, and h
# note: using second precision greatly helps InfluxDB compression
precision = "s"
# Connection timeout (for the connection with InfluxDB), formatted as a string.
# If not provided, will default to 0 (no timeout)
# timeout = "5s"
username = "{{ influxdb['auth-username'] }}"
password = "{{ influxdb['auth-password'] }}"
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
###############################################################################
# PLUGINS #
###############################################################################
# Read metrics about cpu usage
[cpu]
# Whether to report per-cpu stats or not
percpu = true
# Whether to report total system cpu stats or not
totalcpu = true
# Comment this line if you want the raw CPU time metrics
drop = ["cpu_time"]
# Read metrics about memory usage
[mem]
# no configuration
[disk]
[io]
[system]
###############################################################################
# ERP5 - PLUGINS #
###############################################################################
#
# Left here as example, don't edit this file directly, but place your config
# files in {{ telegraf['extra-config-dir'] }}
#
#[mysql]
# servers = ["root@unix(/srv/slapgrid/slappart12/srv/runner/instance/slappart1/var/run/mariadb.sock)/erp5"]
#[memcached]
# # XXX kumofs does not support memcached's stat command
# servers = ["10.0.248.233:2013", "10.0.248.233:2003"]
#[haproxy]
# servers = ["http://10.0.121.162:2150/haproxy", "http://10.0.121.162:2152/haproxy"]
#[[inputs.exec]]
# commands = ["/srv/slapgrid/slappart0/bin/slapsensor /srv/slapgrid/slappart0/srv/runner/instance/etc/supervisord.conf"]
# name_suffix = "_slapos"
# interval = "5s"
###############################################################################
# SERVICE PLUGINS #
###############################################################################
This diff is collapsed.
......@@ -4,7 +4,7 @@ kvm
Introduction
------------
This software release is used to deploy KVM and NBD instances.
This software release is used to deploy KVM.
For extensive parameters definition, please look at parameter-input-schema.json.
......@@ -24,7 +24,6 @@ to be accessible from IPv4::
software_release=kvm,
partition_reference="My awesome KVM",
partition_parameter_kw={
"nbd-host":"ubuntu-1204.nbd.vifib.net",
}
)
......@@ -38,10 +37,6 @@ KVM instance parameters:
- frontend-instance-guid
- frontend-addtional-instance-guid
- frontend-instance-name (default: VNC Frontend)
- nbd-port (default: 1024)
- nbd-host
- nbd2-port (default: 1024)
- nbd2-host
- ram-size (default: 4096)
- disk-size = (default: 40)
......
......@@ -15,15 +15,15 @@
[template]
filename = instance.cfg.in
md5sum = ee1fe10d8db4d3c39e3a3f1b53d12883
md5sum = 9ae66fb63a3cdd8072582622aa1bb36c
[template-kvm]
filename = instance-kvm.cfg.jinja2
md5sum = 9916c160b1c9711145d7e10506a9fca8
md5sum = bd3a7229e4fdfa9372ee61b6054acf78
[template-kvm-cluster]
filename = instance-kvm-cluster.cfg.jinja2.in
md5sum = 6e6f6748ec466eb49a4f872aec7563fa
md5sum = 8ce14c5ae114dcfa6e9aff0511b218d4
[template-kvm-resilient]
filename = instance-kvm-resilient.cfg.jinja2
......@@ -45,10 +45,6 @@ md5sum = 34d1b7cc8ca62bfdfce759a1dfbbaccd
filename = template/kvm-export.sh.jinja2
md5sum = 64aa1ce8785f6b94aabd787fa3443082
[template-nbd]
filename = instance-nbd.cfg.jinja2
md5sum = e041e8011ad2ec7f104be173ef76f5e9
[template-nginx]
filename = template/nginx_conf.in
md5sum = 9ca886120a99befe25ca761ddc54753c
......@@ -59,7 +55,7 @@ md5sum = 6328f99728284847b8dd1146aadeae1b
[template-kvm-run]
filename = template/template-kvm-run.in
md5sum = f0190843e3979742fe9e29b8a607539f
md5sum = 729bc484c8c1a82b827cc4bcdff87f95
[template-kvm-controller]
filename = template/kvm-controller-run.in
......@@ -96,3 +92,7 @@ md5sum = b4f6ffef08685bace1b9c01a3bd2620d
[whitelist-domains-default]
filename = template/whitelist-domains-default
md5sum = e9d40162ba77472775256637a2617d14
[boot-image-select-source-config]
filename = template/boot-image-select-source-config.json.in
md5sum = 5dc0cbb8f8dccfdd5c52d0af4a2b2c48
......@@ -354,20 +354,6 @@
"vmxnet3"
]
},
"nbd-host": {
"title": "NBD hostname or IP",
"description": "hostname (or IP) of the NBD server containing the boot image.",
"type": "string",
"format": "internet-address"
},
"nbd-port": {
"title": "NBD port",
"description": "Port of the NBD server containing the boot image.",
"type": "integer",
"default": 1024,
"minimum": 1,
"maximum": 65535
},
"virtual-hard-drive-url": {
"title": "Existing disk image URL",
"description": "If specified, will download an existing disk image (qcow2, raw, ...), and will use it as main virtual hard drive. Can be used to download and use an already installed and customized virtual hard drive.",
......@@ -446,84 +432,31 @@
"type": "boolean",
"default": false
},
"boot-image-url-list": {
"title": "Boot image list",
"description": "The list shall be list of direct URLs to images, followed by hash (#), then by image MD5SUM. Each image shall appear on newline, like: \"https://example.com/image.iso#06226c7fac5bacfa385872a19bb99684<newline>https://example.com/another-image.iso#31b40d58b18e038498ddb46caea1361c\". They will be provided in KVM image list according to the order on the list. After updating the list, the instance has to be restarted to refresh it. Amount of images is limited to 4, and one image can be maximum 20GB. Image will be downloaded and checked against its MD5SUM 4 times, then it will be considered as impossible to download with given MD5SUM. Each image has to be downloaded in time shorter than 4 hours, so in case of very slow images to access, it can take up to 16 hours to download all of them. Note: The instance has to be restarted in order to update the list of available images in the VM. Note: Maximum 3 ISOs are supported.",
"type": "string",
"textarea": true
},
"boot-image-url-select": {
"title": "Boot image",
"type": "array",
"oneOf": [
{
"const": [
"https://shacache.nxdcdn.com/33c08e56c83d13007e4a5511b9bf2c4926c4aa12fd5dd56d493c0653aecbab380988c5bf1671dbaea75c582827797d98c4a611f7fb2b131fbde2c677d5258ec9#326b7737c4262e8eb09cd26773f3356a"
],
"title": "Debian Bookworm 12 netinst x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/02257c3ec27e45d9f022c181a69b59da67e5c72871cdb4f9a69db323a1fad58093f2e69702d29aa98f5f65e920e0b970d816475a5a936e1f3bf33832257b7e92#b710c178eb434d79ce40ce703d30a5f0"
],
"title": "Debian Bullseye 11 netinst x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/ce5ddfdbdaccdf929b7fe321212356347d82a02f6b7733427282b416f113d91e587682b003e9d376ac189c3b731595c50c236962aadf2720c16d9f36913577c0#23bf2a2d60271e553e63525e794415f1"
],
"title": "Centos 8.2004 Minimal x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/3d518612aabbdb77fd6b49cb55b824fed11e40540e4af52f5f26174257715c93740f83079ea618b4d933081f0b1bc69d32b7885b7c75bc90da5ad3fe1814cfd4#c53b2d7c3269c3b91a2d941ceaa8ab9b"
],
"title": "Ubuntu Jammy 24.04.1 Live Server x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/8017c532ed74586b718662d8b11cf8c34fa638b0affd0413ed38623989b8f98ffd0bcb475246e279ea2f3c194a3e33c55e0f376a9727de13e4bfd87e75e47b5d#e8d2a77c51b599c10651608a5d8c286f"
],
"title": "Ubuntu Jammy 22.04.1 Live Server x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/302c990c6d69575ff24c96566e5c7e26bf36908abb0cd546e22687c46fb07bf8dba595bf77a9d4fd9ab63e75c0437c133f35462fd41ea77f6f616140cd0e5e6a#f3a306f40e4a313fb5a584d73b3dee8f"
],
"title": "Ubuntu Focal 20.04.1 Live Server x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/6635269a7eb6fbd6b85fda40cd94f14a27bf53cb1fc82ffcce9fe386a025a43e1ab681db7e8cec50416bfbfc90262f0d95273686a101c74b3f17646f0a34c85b#3708a59af6cf820a95cafe0ae73ac399"
],
"title": "openSUSE Leap 15.2 NET x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/fc17e8c6ae0790162f4beb8fa6226d945cff638429588999b3a08493ff27b280dc2939fba825ae04be1d9082ea8d7c3c002c5e4c39fbbcf88b8ab5104619e28a#ebcdb2223a77f098af3923fe1fa180aa"
],
"title": "Arch Linux 2020.09.01 x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/c5a511f349a1146b615e6fab9c24f9be4362046adcf24f0ff82c470d361fac5f6628895e2110ebf8ff87db49d4c413a0a332699da6b1bec64275e0c17a15b999#ca7a1e555c04b4d9a549065fa2ddf713"
],
"title": "Fedora Server 32-1.6 netinst x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/6c355def68b3c0427f21598cb054ffc893568902f205601ac60f192854769b31bc9cff8eeb6ce99ef975a8fb887d8d3e56fc6cd5ea5cb4b3bba1175c520047cb#57088b77f795ca44b00971e44782ee23"
],
"title": "FreeBSD 12.1 RELEASE bootonly x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/e72e03bbcc4c54ce4b8d5f360b47dab9ee514d754e8d78c403626cf000d6ae98d808b3bcff2201e3cf49c1be1b0f308f1cb5ed81676adcb1837dfc811d2451ac"
],
"title": "SUSE Linux Enterprise Server 15 SP6 x86_64"
}
"type": "string",
"description": "Selectable list of provided ISO images.",
"default": "Debian Bookworm 12 netinst x86_64",
"enum": [
"Debian Bookworm 12 netinst x86_64",
"Debian Bullseye 11 netinst x86_64",
"Centos 8.2004 Minimal x86_64",
"Ubuntu Noble 24.04 Live Server x86_64",
"Ubuntu Jammy 22.04 Live Server x86_64",
"Ubuntu Focal 20.04 Live Server x86_64",
"openSUSE Leap 15 NET x86_64",
"Arch Linux 2020.09.01 x86_64",
"Fedora Server 32 netinst x86_64",
"FreeBSD 12.1 RELEASE bootonly x86_64",
"SUSE Linux Enterprise Server 15 SP6 x86_64"
]
},
"boot-image-url-list": {
"title": "[EXPERT] Boot image list",
"description": "The list shall be list of direct URLs to images, followed by hash (#), then by image MD5SUM. Each image shall appear on newline, like: \"https://example.com/image.iso#06226c7fac5bacfa385872a19bb99684<newline>https://example.com/another-image.iso#31b40d58b18e038498ddb46caea1361c\". They will be provided in KVM image list according to the order on the list. Maximum images: 4. Maximum image size: 20GB. Download tires: 4. Maximum download time: 4h.",
"type": "string",
"textarea": true
},
"whitelist-domains": {
"title": "Whitelist domains",
"description": "List of whitelisted domain names to be accessed from the VM. They will be resolved to IPs depending on where the VM end up. IPs can be used too.",
......
......@@ -75,8 +75,6 @@ config-name = {{ instance_name }}
{% if slapparameter_dict.get('authorized-keys', []) -%}
config-authorized-key = {{ dumps(slapparameter_dict.get('authorized-keys') | join('\n')) }}
{% endif -%}
config-nbd-port = {{ dumps(kvm_parameter_dict.get('nbd-port', 1024)) }}
config-nbd2-port = {{ dumps(kvm_parameter_dict.get('nbd-port2', 1024)) }}
config-ram-size = {{ dumps(kvm_parameter_dict.get('ram-size', 4096)) }}
config-ram-max-size = {{ dumps(kvm_parameter_dict.get('ram-max-size', int(kvm_parameter_dict.get('ram-size', 4096)) + 512)) }}
config-enable-device-hotplug = {{ dumps(kvm_parameter_dict.get('enable-device-hotplug', False)) }}
......@@ -89,7 +87,6 @@ config-cpu-max-count = {{ dumps(kvm_parameter_dict.get('cpu-max-count', int(kvm_
config-network-adapter = {{ dumps(kvm_parameter_dict.get('network-adapter', 'virtio-net-pci')) }}
{{ setconfig('numa', kvm_parameter_dict.get('numa', '')) }}
{{ setconfig('machine-options', kvm_parameter_dict.get('machine-options', '')) }}
{{ setconfig('nbd-host', kvm_parameter_dict.get('nbd-host', '')) }}
{{ setconfig('host2', kvm_parameter_dict.get('host2', '')) }}
config-auto-ballooning = {{ dumps(kvm_parameter_dict.get('auto-ballooning', True)) }}
......
......@@ -145,33 +145,6 @@
"vmxnet3"
]
},
"nbd-host": {
"title": "NBD hostname",
"description": "hostname (or IP) of the NBD server containing the boot image.",
"type": "string",
"format": "internet-address"
},
"nbd-port": {
"title": "NBD port",
"description": "Port of the NBD server containing the boot image.",
"type": "integer",
"default": 1024,
"minimum": 1,
"maximum": 65535
},
"nbd2-host": {
"title": "Second NBD hostname",
"description": "hostname (or IP) of the second NBD server (containing drivers for example).",
"type": "string",
"format": "internet-address"
},
"nbd2-port": {
"title": "Second NBD port",
"description": "Port of the second NBD server containing the boot image.",
"type": "integer",
"minimum": 1,
"maximum": 65535
},
"virtual-hard-drive-url": {
"title": "Existing disk image URL",
"description": "If specified, will download an existing disk image (qcow2, raw, ...), and will use it as main virtual hard drive. Can be used to download and use an already installed and customized virtual hard drive.",
......@@ -312,84 +285,31 @@
"format": "uri",
"default": "http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg"
},
"boot-image-url-list": {
"title": "Boot image list",
"description": "The list shall be list of direct URLs to images, followed by hash (#), then by image MD5SUM. Each image shall appear on newline, like: \"https://example.com/image.iso#06226c7fac5bacfa385872a19bb99684<newline>https://example.com/another-image.iso#31b40d58b18e038498ddb46caea1361c\". They will be provided in KVM image list according to the order on the list. After updating the list, the instance has to be restarted to refresh it. Amount of images is limited to 4, and one image can be maximum 20GB. Image will be downloaded and checked against its MD5SUM 4 times, then it will be considered as impossible to download with given MD5SUM. Each image has to be downloaded in time shorter than 4 hours, so in case of very slow images to access, it can take up to 16 hours to download all of them. Note: The instance has to be restarted in order to update the list of available images in the VM. Note: Maximum 3 ISOs are supported.",
"type": "string",
"textarea": true
},
"boot-image-url-select": {
"title": "Boot image",
"type": "array",
"oneOf": [
{
"const": [
"https://shacache.nxdcdn.com/33c08e56c83d13007e4a5511b9bf2c4926c4aa12fd5dd56d493c0653aecbab380988c5bf1671dbaea75c582827797d98c4a611f7fb2b131fbde2c677d5258ec9#326b7737c4262e8eb09cd26773f3356a"
],
"title": "Debian Bookworm 12 netinst x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/02257c3ec27e45d9f022c181a69b59da67e5c72871cdb4f9a69db323a1fad58093f2e69702d29aa98f5f65e920e0b970d816475a5a936e1f3bf33832257b7e92#b710c178eb434d79ce40ce703d30a5f0"
],
"title": "Debian Bullseye 11 netinst x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/ce5ddfdbdaccdf929b7fe321212356347d82a02f6b7733427282b416f113d91e587682b003e9d376ac189c3b731595c50c236962aadf2720c16d9f36913577c0#23bf2a2d60271e553e63525e794415f1"
],
"title": "Centos 8.2004 Minimal x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/3d518612aabbdb77fd6b49cb55b824fed11e40540e4af52f5f26174257715c93740f83079ea618b4d933081f0b1bc69d32b7885b7c75bc90da5ad3fe1814cfd4#c53b2d7c3269c3b91a2d941ceaa8ab9b"
],
"title": "Ubuntu Jammy 24.04.1 Live Server x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/8017c532ed74586b718662d8b11cf8c34fa638b0affd0413ed38623989b8f98ffd0bcb475246e279ea2f3c194a3e33c55e0f376a9727de13e4bfd87e75e47b5d#e8d2a77c51b599c10651608a5d8c286f"
],
"title": "Ubuntu Jammy 22.04.1 Live Server x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/302c990c6d69575ff24c96566e5c7e26bf36908abb0cd546e22687c46fb07bf8dba595bf77a9d4fd9ab63e75c0437c133f35462fd41ea77f6f616140cd0e5e6a#f3a306f40e4a313fb5a584d73b3dee8f"
],
"title": "Ubuntu Focal 20.04.1 Live Server x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/6635269a7eb6fbd6b85fda40cd94f14a27bf53cb1fc82ffcce9fe386a025a43e1ab681db7e8cec50416bfbfc90262f0d95273686a101c74b3f17646f0a34c85b#3708a59af6cf820a95cafe0ae73ac399"
],
"title": "openSUSE Leap 15.2 NET x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/fc17e8c6ae0790162f4beb8fa6226d945cff638429588999b3a08493ff27b280dc2939fba825ae04be1d9082ea8d7c3c002c5e4c39fbbcf88b8ab5104619e28a#ebcdb2223a77f098af3923fe1fa180aa"
],
"title": "Arch Linux 2020.09.01 x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/c5a511f349a1146b615e6fab9c24f9be4362046adcf24f0ff82c470d361fac5f6628895e2110ebf8ff87db49d4c413a0a332699da6b1bec64275e0c17a15b999#ca7a1e555c04b4d9a549065fa2ddf713"
],
"title": "Fedora Server 32-1.6 netinst x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/6c355def68b3c0427f21598cb054ffc893568902f205601ac60f192854769b31bc9cff8eeb6ce99ef975a8fb887d8d3e56fc6cd5ea5cb4b3bba1175c520047cb#57088b77f795ca44b00971e44782ee23"
],
"title": "FreeBSD 12.1 RELEASE bootonly x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/e72e03bbcc4c54ce4b8d5f360b47dab9ee514d754e8d78c403626cf000d6ae98d808b3bcff2201e3cf49c1be1b0f308f1cb5ed81676adcb1837dfc811d2451ac"
],
"title": "SUSE Linux Enterprise Server 15 SP6 x86_64"
}
"description": "Selectable list of provided ISO images.",
"type": "string",
"default": "Debian Bookworm 12 netinst x86_64",
"enum": [
"Debian Bookworm 12 netinst x86_64",
"Debian Bullseye 11 netinst x86_64",
"Centos 8.2004 Minimal x86_64",
"Ubuntu Noble 24.04 Live Server x86_64",
"Ubuntu Jammy 22.04 Live Server x86_64",
"Ubuntu Focal 20.04 Live Server x86_64",
"openSUSE Leap 15 NET x86_64",
"Arch Linux 2020.09.01 x86_64",
"Fedora Server 32 netinst x86_64",
"FreeBSD 12.1 RELEASE bootonly x86_64",
"SUSE Linux Enterprise Server 15 SP6 x86_64"
]
},
"boot-image-url-list": {
"title": "[EXPERT] Boot image list",
"description": "The list shall be list of direct URLs to images, followed by hash (#), then by image MD5SUM. Each image shall appear on newline, like: \"https://example.com/image.iso#06226c7fac5bacfa385872a19bb99684<newline>https://example.com/another-image.iso#31b40d58b18e038498ddb46caea1361c\". They will be provided in KVM image list according to the order on the list. Maximum images: 4. Maximum image size: 20GB. Download tires: 4. Maximum ownload time: 4h.",
"type": "string",
"textarea": true
},
"whitelist-domains": {
"title": "Whitelist domains",
"description": "List of whitelisted domain names to be accessed from the VM. They will be resolved to IPs depending on where the VM end up. IPs can be used too.",
......
{
"type": "object",
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Input Parameters For NDB Server",
"properties": {}
}
{
"$schema": "http://json-schema.org/draft-07/schema#",
"name": "Output Parameters",
"properties": {
"nbd_url": {
"title": "NBD server URL",
"description": "URL to be used to boot another VM. Requires IPv6. IPv6 should be used as \"NBD hostname\" and port as \"NBD port\"",
"type": "string",
"format": "uri"
},
"upload_url": {
"title": "Upload URL",
"description": "URL used to upload your VM image.",
"type": "string",
"format": "uri"
},
"upload_key": {
"title": "Upload key",
"description": "Key used to upload your VM image.",
"type": "string",
"format": "uri"
}
}
}
......@@ -19,8 +19,11 @@
{% set whitelist_domains = slapparameter_dict.get('whitelist-domains', '') -%}
{% set virtual_hard_drive_url_enabled = 'virtual-hard-drive-url' in slapparameter_dict %}
{% set virtual_hard_drive_url_gzipped = slapparameter_dict.get('virtual-hard-drive-gzipped', False) %}
{% set boot_image_url_list_enabled = 'boot-image-url-list' in slapparameter_dict %}
{% set boot_image_url_select_enabled = 'boot-image-url-select' in slapparameter_dict %}
{% if 'boot-image-url-select' not in slapparameter_dict and ('boot-image-url-list' in slapparameter_dict or 'nbd-host' in slapparameter_dict or 'nbd2-host' in slapparameter_dict or virtual_hard_drive_url_enabled)%}
{% set boot_image_url_select_default = '' %}
{% else %}
{% set boot_image_url_select_default = 'Debian Bookworm 12 netinst x86_64' %}
{% endif %}
{% set bootstrap_script_url = slapparameter_dict.get('bootstrap-script-url') -%}
{% set cpu_max_count = dumps(slapparameter_dict.get('cpu-max-count', int(slapparameter_dict.get('cpu-count', 2)) + 1)) %}
{% set ram_max_size = dumps(slapparameter_dict.get('ram-max-size', int(slapparameter_dict.get('ram-size', 4096)) + 512)) %}
......@@ -63,16 +66,12 @@ virtual-hard-drive-url-repository = ${:srv}/virtual-hard-drive-url-repository
virtual-hard-drive-url-var = ${:var}/virtual-hard-drive-url
virtual-hard-drive-url-expose = ${monitor-directory:private}/virtual-hard-drive-url
{%- endif %}
{%- if boot_image_url_list_enabled %}
boot-image-url-list-repository = ${:srv}/boot-image-url-list-repository
boot-image-url-list-var = ${:var}/boot-image-url-list
boot-image-url-list-expose = ${monitor-directory:private}/boot-image-url-list
{%- endif %}
{%- if boot_image_url_select_enabled %}
boot-image-url-select-repository = ${:srv}/boot-image-url-select-repository
boot-image-url-select-var = ${:var}/boot-image-url-select
boot-image-url-select-expose = ${monitor-directory:private}/boot-image-url-select
{%- endif %}
[create-mac]
recipe = slapos.cookbook:generate.mac
......@@ -88,7 +87,6 @@ storage-path = ${directory:srv}/.passwd
# VNC protocol supports passwords of 8 characters max
bytes = 8
{% if boot_image_url_select_enabled %}
## boot-image-url-select support BEGIN
[empty-file-state-base-select-promise]
<= monitor-promise-base
......@@ -99,13 +97,12 @@ config-url = ${monitor-base:base-url}/private/boot-image-url-select/${:filename}
[boot-image-url-select-source-config]
recipe = slapos.recipe.template:jinja2
inline =
{%- raw %}
{{ boot_image_url_select }}
{% endraw -%}
boot-image-url-select = {{ dumps(slapparameter_dict['boot-image-url-select']) }}
url = {{ boot_image_select_source_config }}
boot-image-url-select = {{ dumps(slapparameter_dict.get('boot-image-url-select', '')) }}
boot-image-url-select-default = {{ dumps(boot_image_url_select_default) }}
context =
key boot_image_url_select :boot-image-url-select
key boot_image_url_select_default :boot-image-url-select-default
output = ${directory:etc}/boot-image-url-select.json
[boot-image-url-select-processed-config]
......@@ -183,9 +180,7 @@ config-filename = ${boot-image-url-select-download-wrapper:md5sum-state-file}
filename = ${boot-image-url-select-download-wrapper:error-state-filename}
config-filename = ${boot-image-url-select-download-wrapper:error-state-file}
## boot-image-url-select support END
{% endif %} {# if boot_image_url_select_enabled #}
{% if boot_image_url_list_enabled %}
## boot-image-url-list support BEGIN
[empty-file-state-base-list-promise]
<= monitor-promise-base
......@@ -200,7 +195,7 @@ inline =
{%- raw %}
{{ boot_image_url_list }}
{% endraw -%}
boot-image-url-list = {{ dumps(slapparameter_dict['boot-image-url-list']) }}
boot-image-url-list = {{ dumps(slapparameter_dict.get('boot-image-url-list', '')) }}
context =
key boot_image_url_list :boot-image-url-list
output = ${directory:etc}/boot-image-url-list.conf
......@@ -280,7 +275,6 @@ config-filename = ${boot-image-url-list-download-wrapper:md5sum-state-file}
filename = ${boot-image-url-list-download-wrapper:error-state-filename}
config-filename = ${boot-image-url-list-download-wrapper:error-state-file}
## boot-image-url-list support END
{% endif %} {# if boot_image_url_list_enabled #}
{% if virtual_hard_drive_url_enabled %}
## virtual-hard-drive-url support BEGIN
......@@ -399,26 +393,13 @@ ipv6 = ${slap-network-information:global-ipv6}
vnc-ip = ${:ipv4}
vnc-websocket-port = 5701
default-cdrom-iso = {{ debian_amd64_netinst_location }}
{% if virtual_hard_drive_url_enabled %}
virtual-hard-drive-url-json-config = ${virtual-hard-drive-url-json-config:output}
{% else %}
virtual-hard-drive-url-json-config =
{% endif %}
{% if boot_image_url_list_enabled %}
boot-image-url-list-json-config = ${boot-image-url-list-json-config:output}
{% else %}
boot-image-url-list-json-config =
{% endif %}
{% if boot_image_url_select_enabled %}
boot-image-url-select-json-config = ${boot-image-url-select-json-config:output}
{% else %}
boot-image-url-select-json-config =
{% endif %}
nbd-host = ${slap-parameter:nbd-host}
nbd-port = ${slap-parameter:nbd-port}
nbd2-host = ${slap-parameter:nbd2-host}
nbd2-port = ${slap-parameter:nbd2-port}
tap-interface = {{ slap_configuration.get('tap-name', '') }}
tap-ipv6-addr = {{ slap_configuration.get('tap-ipv6-addr', '') }}
......@@ -549,12 +530,8 @@ update-command = ${:command}
command = [ ! -f {{ '${' + key + '}' }} ] && touch {{ '${' + key + '}' }}
{%- endmacro %}
{#- Create depending sections, as state files appear late, so it's better to have empty file which will impact the hash anyway #}
{%- if boot_image_url_list_enabled %}
{{ generate_depend_section('boot-image-url-list-depend', 'boot-image-url-list-download-wrapper:config') }}
{%- endif %}
{%- if boot_image_url_select_enabled %}
{{ generate_depend_section('boot-image-url-select-depend', 'boot-image-url-select-download-wrapper:config') }}
{%- endif %}
{%- if virtual_hard_drive_url_enabled %}
{{ generate_depend_section('virtual-hard-drive-url-depend', 'virtual-hard-drive-url-download-wrapper:config') }}
{%- endif %}
......@@ -1085,10 +1062,6 @@ frontend-additional-software-type = default
frontend-additional-software-url = http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg
frontend-additional-instance-guid =
frontend-additional-instance-name = VNC Real Frontend Additional
nbd-port = 1024
nbd-host =
nbd2-port = 1024
nbd2-host =
boot-image-url-list =
enable-device-hotplug = False
......@@ -1285,20 +1258,16 @@ parts =
virtual-hard-drive-url-download-state-promise
virtual-hard-drive-url-processed-config-promise
{% endif %}
{% if boot_image_url_list_enabled %}
boot-image-url-list-download-wrapper
boot-image-url-list-config-state-promise
boot-image-url-list-download-md5sum-promise
boot-image-url-list-download-state-promise
boot-image-url-list-processed-config-promise
{% endif %}
{% if boot_image_url_select_enabled %}
boot-image-url-select-download-wrapper
boot-image-url-select-config-state-promise
boot-image-url-select-download-md5sum-promise
boot-image-url-select-download-state-promise
boot-image-url-select-processed-config-promise
{% endif %}
{% if additional_frontend %}
frontend-additional-promise
{% endif %}
......
#############################
#
# Instanciate nbdserver
#
#############################
[buildout]
parts =
nbd-promise
onetimeupload-promise
publish-connection-information
extends = {{ template_monitor }}
{% set ipv6 = slap_configuration['ipv6-random'] -%}
[rootdirectory]
recipe = slapos.cookbook:mkdirectory
bin = ${buildout:directory}/bin
etc = ${buildout:directory}/etc
srv = ${buildout:directory}/srv
log = ${buildout:directory}/log
[basedirectory]
recipe = slapos.cookbook:mkdirectory
services = ${rootdirectory:etc}/run
watched-services = ${rootdirectory:etc}/service
[nbd-instance]
recipe = slapos.cookbook:nbdserver
ip = {{ ipv6 }}
port = 1024
image-path = ${onetimeupload-instance:image-path}
qemu-path = {{ qemu_nbd_executable_location }}
shell-path = {{ dash_executable_location }}
# XXX TODO: Wait for the iso to be uploaded (execute_wait)
path = ${basedirectory:services}/nbdserver
[nbd-checker-bin]
recipe = slapos.recipe.template
inline =
#!/bin/sh
[ ! -f ${onetimeupload-instance:image-path} ] ||
${buildout:executable} -c 'import socket ; socket.create_connection(("${nbd-instance:ip}","${nbd-instance:port}")).close()'
output = ${rootdirectory:bin}/check-nbd-running.sh
[nbd-promise]
<= monitor-promise-base
promise = check_command_execute
name = nbd_promise.py
config-command = ${nbd-checker-bin:output}
[gen-passwd]
recipe = slapos.cookbook:generate.password
storage-path = ${rootdirectory:srv}/passwd
bytes = 24
[onetimeupload-instance]
recipe = slapos.cookbook:onetimeupload
ip = {{ ipv6 }}
port = {{ slapparameter_dict.get('otu-port', 8080) }}
image-path = ${rootdirectory:srv}/cdrom.iso
log-path = ${rootdirectory:log}/onetimeupload.log
shell-path = {{ dash_executable_location }}
onetimeupload-path = {{ onetimeupload_executable_location }}
path = ${basedirectory:watched-services}/onetimeupload
key = ${gen-passwd:passwd}
[onetimeupload-promise]
<= monitor-promise-base
promise = check_socket_listening
name = onetimeupload_promise.py
config-host = ${onetimeupload-instance:ip}
config-port = ${onetimeupload-instance:port}
[publish-connection-information]
recipe = slapos.cookbook:publish
nbd_hostname = ${nbd-instance:ip}
nbd_port = ${nbd-instance:port}
upload_url = http://[${onetimeupload-instance:ip}]:${onetimeupload-instance:port}
upload_key = ${onetimeupload-instance:key}
status_message = ${detect-if-cdrom-present:status}
[detect-if-cdrom-present]
recipe = slapos.recipe.build
init =
import os
options['status'] = (
"image already uploaded, you can't upload it again"
if os.path.isfile("${onetimeupload-instance:image-path}")
else "WARNING: no image yet, the NBD server doesn't work")
......@@ -12,7 +12,6 @@ recipe = slapos.cookbook:switch-softwaretype
default = $${:kvm}
kvm-cluster = dynamic-template-kvm-cluster:output
kvm = dynamic-template-kvm:output
nbd = dynamic-template-nbd:output
kvm-resilient = dynamic-template-kvm-resilient:output
kvm-import = dynamic-template-kvm-import:output
......@@ -78,7 +77,7 @@ extra-context =
raw dash_executable_location ${dash:location}/bin/dash
raw dnsresolver_executable ${buildout:bin-directory}/dnsresolver
raw dcron_executable_location ${dcron:location}/sbin/crond
raw debian_amd64_netinst_location ${debian-amd64-bullseye-netinst.iso:target}
raw boot_image_select_source_config ${boot-image-select-source-config:target}
raw whitelist_domains_default ${whitelist-domains-default:target}
raw whitelist_firewall_download_controller ${whitelist-firewall-download-controller:output}
raw image_download_controller ${image-download-controller:output}
......@@ -151,17 +150,3 @@ context =
key slapparameter_dict slap-configuration:configuration
raw zcat_binary ${gzip:location}/bin/zcat
raw gzip_binary ${gzip:location}/bin/gzip
[dynamic-template-nbd]
<= jinja2-template-base
url = ${template-nbd:location}/instance-nbd.cfg.jinja2
filename = template-nbd.cfg
context =
section slap_configuration slap-configuration
key slapparameter_dict slap-configuration:configuration
key eggs_directory buildout:eggs-directory
key develop_eggs_directory buildout:develop-eggs-directory
raw qemu_nbd_executable_location ${qemu:location}/bin/qemu-nbd
raw dash_executable_location ${dash:location}/bin/dash
raw onetimeupload_executable_location ${buildout:bin-directory}/onetimeupload
raw template_monitor ${monitor2-template:output}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment