Commit d181e4f5 by Julien Muchembled

re6st-node: reimplement in Python the part to build files to send to OBS

Makefile was so horrible and unreliable.
I tried which is nice but not suitable for this
(see the example 'wscript' at the end).

Functional improvements:
- better detection of what needs to be rebuilt or not
- reproducible tarballs, except for the re6stnet egg
  (and the main tarball if the egg is rebuilt)
- fewer temporary files

And support for OSC is back.


import os, shutil, subprocess, urllib
from waflib import Node, Utils

PREFIX = "opt/re6st"
repo_dict = dict(

def configure(ctx):
    for name, url in repo_dict.iteritems():
        if ctx.path.find_node(name) is None:
            ctx.exec_command(("git", "clone", url), stdout=None, stderr=None)

def cfg(task):
    o, = task.outputs
    bld = task.generator.bld
    o.write(task.inputs[0].read() % dict(
        ROOT="${buildout:directory}/" + bld.bldnode.path_from(o.parent),

def bootstrap(task):
    b = task.outputs[0]
    d = b.parent.parent
    bootstrap = urllib.urlopen(BOOTSTRAP_URL).read()
    for cache in "download-cache", "extends-cache":
        cache = d.make_node(cache).abspath()
        if os.path.exists(cache):
    cwd = d.abspath()
    task.exec_command(("python2", "-S"), input=bootstrap, cwd=cwd)
    task.exec_command((b.abspath(), "buildout:parts=python"), cwd=cwd)

def sdist(task):
    r, p = task.inputs
    d = p.find_node("../../download-cache/dist")
    for x in d.ant_glob("re6stnet-*", quiet=True):
    task.exec_command((p.abspath(), "", "sdist", "-d", d.abspath()),

def build(bld):
    b = bld.bldnode.make_node(PREFIX)
    buildout_cfg = b.make_node("buildout.cfg")
    bld(source="", target=buildout_cfg, rule=cfg)
    tg = bld(source=(buildout_cfg, "slapos"), rule=bootstrap,
             target=map(b.make_node, ("bin/buildout", "bin/python")))
    buildout, python =
    r = bld.path.find_node("re6stnet")
    tg = bld(source=(r, python), rule=sdist, update_outputs=True,
    bld(name="buildout", source=(buildout_cfg,,
        rule=lambda task: task.exec_command((buildout.abspath(),),
            stdout=None, stderr=None, cwd=b.abspath()))


def h_file(fname, h_file=Utils.h_file):
    if os.path.isdir(fname):
        m = Utils.md5(fname)
        n = len(fname) + 1
        for dirpath, dirs, files in os.walk(fname):
            if dirpath.endswith(("/.git", ".egg-info")):
                del dirs[:]
            m.update(dirpath[n:] + '\0')
            for fname in files:
                m.update("%s\0%s\0" % (h_file(os.path.join(dirpath, fname)),
        return m.digest()
    return h_file(fname)
Utils.h_file = h_file

def find_resource(self, lst):
    if isinstance(lst, str):
        lst = [x for x in Node.split_path(lst) if x and x != '.']
    return self.get_bld().search_node(lst) \
        or self.get_src().find_node(lst)
Node.Node.find_resource = find_resource
1 parent 0fa14b78
preparing the package
Building the files to be sent to OBS
First make sure all files are ready and you have all necessary packages installed.
You need in particular an OBS directory (for example, the vifib test directory)::
$ cd <directory_to_contain_prepare_script>
$ osc checkout home:VIFIBnexedi:branches:home:VIFIBnexedi/Re6stnet
$ cd home:VIFIBnexedi:branches:home:VIFIBnexedi/Re6stnet
$ osc up
$ ./make
All output files are in the 'dist' folder, which is created automatically.
Upload to OBS
For this, you need a checkout of the OBS repository, and make a 'osc' symlink
pointing to it. For example, the vifib test directory::
A$ cd <where_you_want>
B$ osc checkout home:VIFIBnexedi:branches:home:VIFIBnexedi/Re6stnet
B$ cd home:VIFIBnexedi:branches:home:VIFIBnexedi/Re6stnet
B$ osc up
B$ cd A
A$ ln -s B/home:VIFIBnexedi:branches:home:VIFIBnexedi/Re6stnet osc
And whenever you want to push updates::
A$ ./make osc
A$ (cd osc; osc commit)
Warning about SlapOS updates
When a SlapOS update would add new files to download-cache or extends-cache,
everything should be rebuilt by deleting the 'build' folder, in order to remove
unused files from the caches.
#!/usr/bin/env python
# Copyright (C) 2016 Julien Muchembled <>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <>.
Very basic Python implementation of a build system similar to the well-known
'make'. The main concept is the same:
input files --( recipe )-> output files
with comparison of timestamps to trigger recipes
(i.e. no temporary files generated to track the status of the build).
The main differences are:
- no parallelism
- no implicit rule, no predefined rule
+ inputs can be whole directory
+ change time of inputs is also taken into account
+ multiple list of outputs for a recipe
+ dynamic list of outputs
+ no need to care about wrongly/partially written files in case of failure
+ Python...
import argparse, atexit, errno, gzip, os, shutil, subprocess, sys, tarfile
from contextlib import contextmanager
INF = float("inf")
_INF = - INF
class _task(object):
def __init__(self, *src):
self.outputs = src
def __str__(self):
o = self.outputs
return repr(o[0]) if len(o) == 1 else str(o)
def output(self):
o, = self.outputs
return o
def __call__(self, dry_run):
run = self._run
if callable(run):
self._run = run = run(dry_run)
return run
def _time(path):
return os.stat(path).st_mtime
def _run(self, dry_run=None):
x = self.outputs
return max(map(self._time, x)) if x else _INF
class files(_task):
def __init__(self, *src, **kw):
if not kw.pop("ctime", True):
self._time = _task._time
_task.__init__(self, *src, **kw)
def _time(path):
s = os.stat(path)
return max(s.st_ctime, s.st_mtime)
class tree(files):
e.g. @task(tree("some/folder"), "some/output")
def __init__(self, root, ignore=None, **kw):
files.__init__(self, root, **kw)
self.ignore = ignore
def __iter__(self):
root, = self.outputs
yield root
ignore = self.ignore
n = len(root) + 1
for dirpath, dirs, files in os.walk(root):
dirpath += os.sep
if ignore:
x = dirpath[n:]
dirs[:] = (name for name in dirs if not ignore(x + name))
for name in dirs:
yield dirpath + name
for name in files:
x = dirpath + name
if not (ignore and ignore(x[n:])):
yield x
def _run(self, dry_run=None):
return max(map(self._time, self))
class task(_task):
@task(depends, provides)
def mytask(task):
# task.inputs
# task.outputs
'depends' is a sequence of other tasks or file paths. The order is important
because it defines the order of task.inputs: not however that paths are
automatically moved after tasks.
'provides' is a sequence of callables or file paths. Callables are always
called to complete task.outputs. Recipe is always called if task.outputs
is empty.
For both 'depends' and 'provides', if you have only 1 element, you can pass
it directly instead of making a 1-size sequence.
'input' and 'output' properties are shortcut to get the only path.
'why' is a list of tasks explaining why the recipe is called.
def __new__(cls, *args, **kw):
def task_gen(func):
self = _task.__new__(cls)
self.__init__(func, *args, **kw)
return self
return task_gen
def __init__(self, run, depends, provides=(),
__str_or_task = (basestring, _task)): = run
self.depends = []
f = []
for x in (depends,) if isinstance(depends, __str_or_task) else depends:
(f if isinstance(x, basestring) else self.depends).append(x)
f and self.depends.append(files(*f))
self.provides = ((provides,)
if isinstance(provides, basestring) or callable(provides)
else provides)
self.why = self,
def __str__(self):
def input(self):
i, = self.inputs
return i
def _run(self, dry_run, _otime=INF):
self.inputs = x = []
deps = []
for dep in self.depends:
deps.append((dep, dep(dry_run)))
x += dep.outputs
self.outputs = x = []
for p in self.provides:
if callable(p):
x += p(self)
if None not in x:
_otime = _task._run(self)
except OSError as e:
if e.errno != errno.ENOENT:
if deps:
self.why = [dep for dep, itime in deps if _otime < itime]
if self.why:
print "# Processing %s: %s -> %s" % (self,
", ".join(map(str, self.depends)),
", ".join("<%s>" % x.__name__ if callable(x) else x
for x in self.provides or "?"))
if not dry_run:
if _otime is INF:
_otime = max(x[1] for x in deps) - 1
# Files may still be open due to references in tracebacks.
# Make sure they're all closed before reverting mtimes.
atexit.register(self._revert, self.outputs, _otime)
return _task._run(self) if self.outputs else INF
return _otime
def _revert(cls, outputs, mtime):
for x in outputs:
if mtime < cls._time(x):
os.utime(x, (mtime, mtime))
except Exception:
def main():
parser = argparse.ArgumentParser()
_ = parser.add_argument
_("-f", "--file", type=argparse.FileType("r"), default="",
help="Python script describing how to build the project"
" (default:")
_("-l", "--list", action="store_true",
help="List defined tasks.")
_("-n", "--dry-run", action="store_true",
help="Print the tasks that would be executed.")
_("task", nargs="*", default=("build",),
help="Tasks to process (default: build).")
args = parser.parse_args()
sys.modules["make"] = sys.modules.pop(__name__)
f = args.file
tasks = {"__file__":}
exec(compile(,, "exec"), tasks)
if args.list:
print " ".join(sorted(k for k, v in tasks.iteritems()
if isinstance(v, _task)))
for t in args.task:
if not isinstance(tasks.get(t), _task):
sys.exit("%s is not a valid task." % t)
for t in args.task:
# Helpers
class git(tree):
def __init__(self, root, url=None, ignore=None, **kw):
_ignore = lambda x: x == ".git" or ignore and ignore(x)
tree.__init__(self, root, _ignore, **kw)
self.url = url
def _run(self, dry_run):
root, = self.outputs
if os.path.isdir(root):
return tree._run(self)
dry_run or subprocess.check_call(("git", "clone", self.url, root))
return _INF
def check_output(*args, **kw):
# BBB: 'input' arg is a backport from Python 3.4
input = kw.pop("input", None)
if input is not None:
kw["stdin"] = subprocess.PIPE
p = subprocess.Popen(stdout=subprocess.PIPE, *args, **kw)
out = p.communicate(input)[0]
if p.returncode:
raise subprocess.CalledProcessError(p.returncode,
args[0] if args else kw["args"])
return out
def cwd(path):
p = os.getcwd()
yield p
def mkdir(path):
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(path):
def remove(path):
except OSError as e:
if e.errno != errno.ENOENT:
def rmtree(path):
if os.path.exists(path):
def make_tar_gz(path, mtime, xform=(lambda x: x), **kw):
# Make reproducible tarball. Otherwise, it's really annoying that we can't
# rely on 'osc status' to know whether there are real changes or not.
# BBB: Results differ between Python 2.6 and 2.7 because of tarfile.
__init__ = gzip.GzipFile.__init__
listdir = os.listdir
if sys.version_info >= (2, 7):
gzip.GzipFile.__init__ = lambda *args: __init__(mtime=mtime, *args)
os.listdir = lambda path: sorted(listdir(path))
t =, "w:gz", **kw)
_gettarinfo = t.gettarinfo
def gettarinfo(name=None, arcname=None, fileobj=None):
tarinfo = _gettarinfo(name, xform(arcname or name), fileobj)
tarinfo.mtime = mtime
return tarinfo
t.gettarinfo = gettarinfo
yield t
gzip.GzipFile.__init__ = __init__
os.listdir = listdir
if sys.version_info < (2, 7):
with open(path, "r+") as t:
gzip.write32u(t, long(mtime))
if __name__ == "__main__":
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!