runner-import.sh.jinja2 7.19 KB
Newer Older
1
#!{{ shell_binary }}
2 3
LC_ALL=C
export LC_ALL
4
umask 077
5

6 7 8 9
# Exit on any error, to prevent inconsistent backup
# Error on unset variable expansion
set -eu

10
# Redirect output to log
11
exec > >(tee -ai '{{ output_log_file }}')
12 13
exec 2>&1

14 15 16
echo -e "\n\n$0 run at : $(date)"

srv_directory='{{ directory["srv"] }}'
17
backup_directory='{{ directory["backup"] }}'
18 19 20 21
etc_directory='{{ directory["etc"] }}'

RESTORE_EXIT_CODE_FILE='{{ restore_exit_code_file }}'
RESTORE_ERROR_MESSAGE_FILE='{{ restore_error_message_file }}'
22
ERROR_MESSAGE=""
23 24 25

fail_with_exit_code () {
  echo 1 > $RESTORE_EXIT_CODE_FILE
26
  echo -e "Failure during step : $ERROR_MESSAGE" > $RESTORE_ERROR_MESSAGE_FILE
27
  exit 1
28 29
}
trap fail_with_exit_code ERR
30

31 32 33 34 35
log_message () {
    ERROR_MESSAGE=$1
    echo -e $1
}
# Delete the error message file, to not keep it even after a successful build
36 37 38 39 40 41 42 43
rm "$RESTORE_ERROR_MESSAGE_FILE" || true

rsync () {
  set -x
  '{{ rsync_binary }}' -rlptgov --stats --safe-links --delete "$@"
  set +x
}

44
log_message "Restoring WebRunner content..."
45
(
46
  # XXX: code duplication with runner-export.sh.jinja2
47 48 49 50 51 52 53 54
  path=$srv_directory/runner
  backup_path=$backup_directory/runner/
  cd "$backup_path"

  if [ -d instance ]; then
    # Concatenate the exclude file of each partition of webrunner
    # to create a global exclude file.
    # Also, ignore all buildout-managed files.
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
    exclude=$({{ sys.executable }} - "$path" <<EOF
if 1:
        import glob, errno, os, sys
        sys.path[:0] = {{ repr(easy_install.buildout_and_setuptools_path) }}
        from zc.buildout.configparser import parse
        path = sys.argv[1]

        def print_relative(path_list):
            for p in path_list:
                p = p.strip()
                if p:
                    print(os.path.relpath(p, path))
        print("*.sock")
        print("*.socket")
        print("*.pid")
        print(".installed*.cfg")
        for partition in glob.glob(path + "/instance/slappart*"):
            os.chdir(partition)
            try:
                with open("srv/exporter.exclude") as f:
                    exclude = f.readlines()
            except IOError as e:
                if e.errno != errno.ENOENT:
                    raise
            else:
                print_relative(exclude)
            for installed in glob.glob(".installed*.cfg"):
                try:
                    with open(installed) as f:
                        installed = parse(f, installed)
                except IOError as e:
                    if e.errno != errno.ENOENT:
                        raise
                else:
                    for section in installed.itervalues():
                        print_relative(section.get(
                            '__buildout_installed__', '').splitlines())
EOF
)
    echo "$exclude" |rsync --exclude-from=- instance "$path"
95
  fi
96

97
  test -d project  && rsync project "$path"
98
  test -d public  && rsync public "$path"
99 100
  test -f proxy.db && rsync proxy.db "$path"
)
101

102 103 104 105 106 107 108
log_message "Restoring WebRunner config (etc directory)..."
(
  cd "$backup_directory"/etc/
  rsync config.json "$etc_directory"
  # Hidden files are related to the webrunner's internals
  cp -r .??* "$etc_directory"
)
109

110
# Invoke arbitrary script to perform specific restoration
111
# procedure.
112
runner_import_restore=$srv_directory/runner-import-restore
113 114 115
if [ -x "$runner_import_restore" ]; then
  log_message "Running $runner_import_restore..."
  "$srv_directory/runner-import-restore"
116
fi
117

118
# If no "etc/.project" neither "srv/runner/proxy.db", we can safely assume
119 120
# that there is no instance deployed on runner0
if [ ! -f "$etc_directory/.project" -a ! -f "$srv_directory/runner/proxy.db" ]; then
121
  log_message "No Software Requested... Writing status file... End"
122 123 124 125
  echo 0 > $RESTORE_EXIT_CODE_FILE
  exit 0
fi

126
log_message "Updating slapproxy database..."
127
HOME='{{ directory["home"] }}'
128 129 130
# XXX Hardcoded
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
export MAKEFLAGS=-j4
131
SLAPOS='{{ directory["bin"] }}'/slapos
132 133 134 135
# XXX hardcoded
SQLITE3="$HOME/software_release/parts/sqlite3/bin/sqlite3"
DATABASE="$HOME/srv/runner/proxy.db"
# Change slapproxy database to point instances to new software release
136
# XXX hardcoded
137
PARTITION=$(basename $HOME)
138
OLD_SOFTWARE_RELEASE=$("$SQLITE3" "$DATABASE" "select software_release from partition11 where reference='slappart0';")
139
SOFTWARE_RELEASE=$(echo $OLD_SOFTWARE_RELEASE | sed -e 's/\(.*\)\(slappart\|test0-\)[0-9][0-9][0-9]\?/\1'"$PARTITION"'/')
140 141
"$SQLITE3" "$DATABASE" "update partition11 set software_release='$SOFTWARE_RELEASE' where software_release NOT NULL;"
"$SQLITE3" "$DATABASE" "update software11 set url='$SOFTWARE_RELEASE' where url='$OLD_SOFTWARE_RELEASE';" || "$SQLITE3" "$DATABASE" "delete from software11 where url='$OLD_SOFTWARE_RELEASE';"
142
# Change slapproxy database to have all instances stopped
143
"$SQLITE3" "$DATABASE" "update partition11 set requested_state='stopped';"
144
# Change slapproxy database to get correct IPs
145 146 147 148
IPV4='{{ ipv4 }}'
IPV6='{{ ipv6 }}'
"$SQLITE3" "$DATABASE" "update partition_network11 set address='$IPV4' where netmask='255.255.255.255';"
"$SQLITE3" "$DATABASE" "update partition_network11 set address='$IPV6' where netmask='ffff:ffff:ffff::';"
149

150
MASTERURL='http://{{ ipv4 }}:{{ proxy_port }}'
151

152 153
log_message "Removing old supervisord service description files..."
# XXX: Path hardcoded in slapos.core
154 155 156 157 158
rm '{{ instance_folder }}'/etc/supervisord.conf.d/* || true

SLAPOSCFG='{{ supervisord["slapos-cfg"] }}'
SLAPGRIDSRLOG='{{ supervisord["slapgrid-sr-log"] }}'
SLAPGRIDCPLOG='{{ supervisord["slapgrid-cp-log"] }}'
159

160
log_message "Building newest Software Release..."
161 162 163
"$SLAPOS" node software --cfg "$SLAPOSCFG" --all --master-url="$MASTERURL" --logfile "$SLAPGRIDSRLOG" >/dev/null 2>&1 ||
"$SLAPOS" node software --cfg "$SLAPOSCFG" --all --master-url="$MASTERURL" --logfile "$SLAPGRIDSRLOG" >/dev/null 2>&1 ||
"$SLAPOS" node software --cfg "$SLAPOSCFG" --all --master-url="$MASTERURL" --logfile "$SLAPGRIDSRLOG" >/dev/null 2>&1 ||
164
(tail -n 200 "$SLAPGRIDSRLOG" && false)
165
# Remove defined scripts to force buildout to recreate them to have updated paths
166
rm "$srv_directory"/runner/instance/slappart*/srv/runner-import-restore || true
167
log_message "Fixing Instances as needed after import..."
168
# XXX hardcoded
169 170 171
"$SLAPOS" node instance --cfg "$SLAPOSCFG" --master-url=$MASTERURL --logfile "$SLAPGRIDCPLOG" >/dev/null 2>&1 ||
"$SLAPOS" node instance --cfg "$SLAPOSCFG" --master-url=$MASTERURL --logfile "$SLAPGRIDCPLOG" >/dev/null 2>&1 ||
"$SLAPOS" node instance --cfg "$SLAPOSCFG" --master-url=$MASTERURL --logfile "$SLAPGRIDCPLOG" >/dev/null 2>&1 ||
172
(tail -n 200 "$SLAPGRIDCPLOG" && false)
173

174
# Invoke defined scripts for each partition inside of slaprunner
175
log_message "Invoke custom import scripts defined by each instances..."
176
for partition in "$srv_directory"/runner/instance/slappart*/
177 178
do
  script=$partition/srv/runner-import-restore
179
  if [ -x "$script" ]; then
180
    log_message "Running custom instance script : $script..."
181
    "$script"
182 183
  fi
done
184

185
# Change back slapproxy database to have all instances started
186
log_message "Set instances as to start after takeover..."
187
"$SQLITE3" "$DATABASE" "update partition11 set requested_state='started';"
188 189

# Write exit code to an arbitrary file that will be checked by promise/monitor
190
log_message "Writing status file... End"
191 192
echo 0 > $RESTORE_EXIT_CODE_FILE
exit 0