Commit cfbed313 by Nicolas Wavrant

Merge remote-tracking branch 'origin/master'

Conflicts:
	slapos/runner/gittools.py
parents 7c98e0a2 022fcb2d
0.37.4 (2013-10-15)
-------------------
* Improve QEMU QMP wrapper by adding drive-backup method and other helpers. [0afb7d6, 95d0c8b]
0.37.3 (2013-10-10)
-------------------
* pubsub: don't swallow output of subprocess to allow debug. [c503484]
0.37.2 (2013-10-10)
-------------------
* Add QEMU QMP wrapper. [9e819a8]
* KVM resiliency test: update docstring about how to setup disk image. [dbe347f]
* KVM resiliency test: change key for each clone. [7ef1db3]
0.37.1 (2013-10-03)
-------------------
* pubsub notifier: handle timeout and other connection errors. [ac4c75c]
* equeue: cast str(timestamp) to please gdbm. [8b067d6]
0.37 (2013-09-30)
=================
* equeue: log output of subprocess. [1694937]
* slaprunner: don't send 200 when login is bad. [4a8e10bf]
* Improve reliability of resiliency tests.
0.36 (2013-09-05)
=================
......
......@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
import glob
import os
version = '0.36.1-dev'
version = '0.37.4'
name = 'slapos.toolbox'
long_description = open("README.txt").read() + "\n" + \
open("CHANGES.txt").read() + "\n"
......@@ -70,6 +70,7 @@ setup(name=name,
'onetimeupload = slapos.onetimeupload:main',
'pubsubnotifier = slapos.pubsub.notifier:main',
'pubsubserver = slapos.pubsub:main',
'qemu-qmp-client = slapos.qemuqmpclient:main',
'shacache = slapos.shacache:main',
'slapbuilder = slapos.builder:main',
'slapcontainer = slapos.container:main',
......
......@@ -72,6 +72,26 @@ class EqueueServer(SocketServer.ThreadingUnixStreamServer):
def setDB(self, database):
self.db = gdbm.open(database, 'cs', 0700)
def _runCommandIfNeeded(self, command, timestamp):
with self.lock:
if command in self.db and timestamp <= int(self.db[command]):
self.logger.info("%s already run.", command)
return
self.logger.info("Running %s, %s with output:", command, timestamp)
try:
self.logger.info(
subprocess.check_output([command], stderr=subprocess.STDOUT)
)
self.logger.info("%s finished successfully.", command)
except subprocess.CalledProcessError as e:
self.logger.warning("%s exited with status %s. output is: \n %s" % (
command,
e.returncode,
e.output,
))
self.db[command] = str(timestamp)
def process_request_thread(self, request, client_address):
# Handle request
self.logger.debug("Connection with file descriptor %d", request.fileno())
......@@ -102,23 +122,7 @@ class EqueueServer(SocketServer.ThreadingUnixStreamServer):
except:
self.logger.warning("Couldn't respond to %r", request.fileno())
self.close_request(request)
# Run command if needed
with self.lock:
if command not in self.db or timestamp > int(self.db[command]):
self.logger.info("Running %s, %s", command, timestamp)
# XXX stdout and stderr redirected to null as they are not read
with open(os.devnull, 'r+') as f:
status = subprocess.call([command], close_fds=True,
stdin=f, stdout=f, stderr=f)
if status:
self.logger.warning("%s finished with non zero status.",
command)
else:
self.logger.info("%s finished successfully.", command)
self.db[command] = timestamp
else:
self.logger.info("%s already runned.", command)
self._runCommandIfNeeded(command, timestamp)
# Well the following function is made for schrodinger's files,
# It will work if the file exists or not
def remove_existing_file(path):
......
......@@ -4,11 +4,11 @@
import argparse
import csv
import httplib
import os
import socket
import subprocess
import sys
import time
import traceback
import urllib2
import urlparse
import uuid
......@@ -31,28 +31,23 @@ def main():
args = parser.parse_args()
with open(os.devnull) as devnull:
command = subprocess.Popen(args.executable[0],
stdin=subprocess.PIPE,
stdout=devnull,
stderr=subprocess.PIPE,
close_fds=True)
command.stdin.flush()
command.stdin.close()
command_failed = (command.wait() != 0)
command_stderr = command.stderr.read()
if command_failed:
content = ("<p>Failed with returncode <em>%d</em>.</p>"
"<p>Standard error output is :</p><pre>%s</pre>") % (
command.poll(),
command_stderr.replace('&', '&amp;')\
.replace('<', '&lt;')\
.replace('>', '&gt;'),
)
else:
content = "<p>Everything went well.</p>"
try:
content = subprocess.check_output(
args.executable[0],
stderr=subprocess.STDOUT
)
exit_code = 0
except subprocess.CalledProcessError as e:
content = e.output
exit_code = e.returncode
print content
content += ("\n<p>Failed with returncode <em>%d</em>.</p>"
"<p>Output is: </p><pre>%s</pre>" % (
exit_code,
content.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
))
with open(args.logfile[0], 'a') as file_:
cvsfile = csv.writer(file_)
......@@ -63,9 +58,8 @@ def main():
'slapos:%s' % uuid.uuid4(),
])
if command_failed:
sys.stderr.write('%s\n' % command_stderr)
sys.exit(1)
if exit_code != 0:
sys.exit(exit_code)
print 'Fetching %s feed...' % args.feed_url[0]
......@@ -80,14 +74,18 @@ def main():
notification_port = socket.getservbyname(notification_url.scheme)
headers = {'Content-Type': feed.info().getheader('Content-Type')}
notification = httplib.HTTPConnection(notification_url.hostname,
notification_port)
notification.request('POST', notification_url.path, body, headers)
response = notification.getresponse()
if not (200 <= response.status < 300):
sys.stderr.write("The remote server at %s didn't send a successful reponse.\n" % notif_url)
sys.stderr.write("Its response was %r\n" % response.reason)
try:
notification = httplib.HTTPConnection(notification_url.hostname,
notification_port)
notification.request('POST', notification_url.path, body, headers)
response = notification.getresponse()
if not (200 <= response.status < 300):
sys.stderr.write("The remote server at %s didn't send a successful reponse.\n" % notif_url)
sys.stderr.write("Its response was %r\n" % response.reason)
some_notification_failed = True
except socket.error as exc:
sys.stderr.write("Connection with remote server at %s failed:\n" % notif_url)
sys.stderr.write(traceback.format_exc(exc))
some_notification_failed = True
if some_notification_failed:
......
##############################################################################
#
# Copyright (c) 2013 Vifib SARL and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import argparse
import json
import os
import pprint
import socket
import time
def parseArgument():
"""
Very basic argument parser. Might blow up for anything else than
"./executable mysocket.sock stop/resume".
"""
parser = argparse.ArgumentParser()
parser.add_argument('--suspend', action='store_const', dest='action', const='suspend')
parser.add_argument('--resume', action='store_const', dest='action', const='resume')
parser.add_argument('--create-snapshot', action='store_const', dest='action', const='createSnapshot')
parser.add_argument('--create-internal-snapshot', action='store_const', dest='action', const='createInternalSnapshot')
parser.add_argument('--delete-internal-snapshot', action='store_const', dest='action', const='deleteInternalSnapshot')
parser.add_argument('--drive-backup', action='store_const', dest='action', const='driveBackup')
parser.add_argument('--query-commands', action='store_const', dest='action', const='queryCommands')
parser.add_argument('--socket', dest='unix_socket_location', required=True)
parser.add_argument('remainding_argument_list', nargs=argparse.REMAINDER)
args = parser.parse_args()
return args.unix_socket_location, args.action, args.remainding_argument_list
class QemuQMPWrapper(object):
"""
Small wrapper around Qemu's QMP to control a qemu VM.
See http://git.qemu.org/?p=qemu.git;a=blob;f=qmp-commands.hx for
QMP API definition.
"""
def __init__(self, unix_socket_location):
self.socket = self.connectToQemu(unix_socket_location)
self.capabilities()
@staticmethod
def connectToQemu(unix_socket_location):
"""
Create a socket, connect to qemu, be sure it answers, return socket.
"""
if not os.path.exists(unix_socket_location):
raise Exception('unix socket %s does not exist.' % unix_socket_location)
print 'Connecting to qemu...'
so = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
connected = False
while not connected:
try:
so.connect(unix_socket_location)
except socket.error:
time.sleep(1)
print 'Could not connect, retrying...'
else:
connected = True
so.recv(1024)
return so
def _send(self, message):
self.socket.send(json.dumps(message))
data = self.socket.recv(65535)
try:
return json.loads(data)
except ValueError:
print 'Wrong data: %s' % data
def _getVMStatus(self):
response = self._send({'execute': 'query-status'})
if response:
return self._send({'execute': 'query-status'})['return']['status']
else:
raise IOError('Empty answer')
def _waitForVMStatus(self, wanted_status):
while True:
try:
actual_status = self._getVMStatus()
if actual_status == wanted_status:
return
else:
print 'VM in %s status, wanting it to be %s, retrying...' % (
actual_status, wanted_status)
time.sleep(1)
except IOError:
print 'VM not ready, retrying...'
def capabilities(self):
print 'Asking for capabilities...'
self._send({'execute': 'qmp_capabilities'})
def suspend(self):
print 'Suspending VM...'
self._send({'execute': 'stop'})
self._waitForVMStatus('paused')
def resume(self):
print 'Resuming VM...'
self._send({'execute': 'cont'})
self._waitForVMStatus('running')
def _queryBlockJobs(self, device):
return self._send({'execute': 'query-block-jobs'})
def _getRunningJobList(self, device):
result = self._queryBlockJobs(device)
if result.get('return'):
return result['return']
else:
return
def driveBackup(self, backup_target, source_device='virtio0', sync_type='full'):
print 'Asking Qemu to perform backup to %s' % backup_target
# XXX: check for error
self._send({
'execute': 'drive-backup',
'arguments': {
'device': source_device,
'sync': sync_type,
'target': backup_target,
}
})
while self._getRunningJobList(backup_target):
print 'Job is not finished yet.'
time.sleep(20)
def createSnapshot(self, snapshot_file, device='virtio0'):
print self._send({
'execute': 'blockdev-snapshot-sync',
'arguments': {
'device': device,
'snapshot-file': snapshot_file,
}
})
def createInternalSnapshot(self, name=None, device='virtio0'):
if name is None:
name = int(time.time())
self._send({
'execute': 'blockdev-snapshot-internal-sync',
'arguments': {
'device': device,
'name': name,
}
})
def deleteInternalSnapshot(self, name, device='virtio0'):
self._send({
'execute': 'blockdev-snapshot-delete-internal-sync',
'arguments': {
'device': device,
'name': name,
}
})
def queryCommands(self):
pprint.pprint(self._send({'execute': 'query-commands'})['return'])
def main():
unix_socket_location, action, remainding_argument_list = parseArgument()
qemu_wrapper = QemuQMPWrapper(unix_socket_location)
if remainding_argument_list:
getattr(qemu_wrapper, action)(*remainding_argument_list)
else:
getattr(qemu_wrapper, action)()
if __name__ == '__main__':
main()
......@@ -15,3 +15,32 @@ This module contains:
* A Resiliency Test Suite framework (in suites/), used to easily write new
test suites
* A list of test suites
TODO :
* Check that each partition is in different slapos node.
* Test for bang calls
* Be able to configure from ERP5 Master (i.e from instance parameters): count of PBS/clones, then test several possibilities (so called "count" in test suite)
* Use Nexedi ERP5, when in production.
* Put the KVM disk image in a safe place.
------------
For reference: How-to deploy the whole test system
1/ Deploy a SlapOS Master
2/ Deploy an ERP5, install erp5_test_result BT with scalability feature (current in scalability-master2 branch of erp5.git) (currently, had to change a few lines in the scalability extension of the portal_class, should be commited)
3/ Configure 3 nodes in the new SlapOS Master, deploy in each a testnode with scalability feature (erp5testnode-scalability branch of slapos.git) with parameters like:
<?xml version="1.0" encoding="utf-8"?>
<instance>
<parameter id="test-node-title">COMP-0-Testnode</parameter>
<parameter id="test-suite-master-url">https://zope:insecure@softinst43496.host.vifib.net/erp5/portal_task_distribution/1</parameter>
</instance>
3bis/ Supply and request http://git.erp5.org/gitweb/slapos.git/blob_plain/refs/tags/slapos-0.92:/software/kvm/software.cfg on a public node (so that vnc frontends are ok). "domain" parameter should be [ipv6] of partition. ipv4:4443 should be 6tunnelled to ipv6:4443 (Note: here, instead, I just hacked kvm_frontend to listen on ipv6).
3ter/ Supply and request http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg, with any "domain" (it won't be used), on a public node (so that web frontends are ok)
4/ On the ERP5 instance, create a project, a Task Distribution (in portal_task_distribution, type Scalability Task Distribution)
5/ On the ERP5 instance, create a Test Suite, validate it
Note: the slapos nodes are currently deployed using slapos-in-partition.
Note: you have to manually kill -10 the erp5testnode process to start deployment of test because it doesn't know when SR installation is finished.
Note: you have to manually run slapos-node-software --all on the slapos nodes if you are developping the SR you are testing.
......@@ -37,10 +37,6 @@ import traceback
from erp5.util import taskdistribution
from erp5.util.testnode import Utils
MAX_INSTALLATION_TIME = 60 * 50
MAX_TESTING_TIME = 60
MAX_GETTING_CONNECTION_TIME = 60 * 5
def importFrom(name):
"""
Import a test suite module (in the suites module) and return it.
......@@ -147,7 +143,7 @@ class ScalabilityLauncher(object):
Return a ScalabilityTest with current running test case informations,
or None if no test_case ready
"""
data = self.test_result.getNextTestCase()
data = self.test_result.getRunningTestCase()
if data == None:
return None
decoded_data = Utils.deunicodeData(json.loads(
......
......@@ -42,6 +42,10 @@ import urllib
logger = logging.getLogger('KVMResiliencyTest')
# Wait for 2 hours before renaming, so that replication of data is done
# (~1GB of data to backup)
SLEEP_TIME = 2 * 60 * 60
def fetchMainInstanceIP(current_partition, software_release, instance_name):
return current_partition.request(
software_release=software_release,
......@@ -105,7 +109,9 @@ def runTestSuite(server_url, key_file, cert_file,
3/ Resilience is done, wait XX seconds
4/ For each clone: do a takeover. Check that IPv6 of new main instance is different. Check, when doing a http request to the new VM that will fetch the stored random number, that the sent number is the same.
Note: disk image is a simple debian with the following python code running at boot:
Note: disk image is a simple debian with gunicorn and flask installed:
apt-get install python-setuptools; easy_install gunicorn flask
With the following python code running at boot in /root/number.py:
import os
......@@ -115,7 +121,7 @@ def runTestSuite(server_url, key_file, cert_file,
storage = 'storage.txt'
@app.route("/")
def greeting_list(): # 'cause they are several greetings, and plural is forbidden.
def greeting_list(): # 'cause there are several greetings, and plural is forbidden.
return "Hello World"
@app.route("/get")
......@@ -124,13 +130,39 @@ def runTestSuite(server_url, key_file, cert_file,
@app.route("/set")
def set():
if os.path.exists(storage):
abort(503)
#if os.path.exists(storage):
# abort(503)
open(storage, 'w').write(request.args['key'])
return "OK"
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80)
Then create the boot script:
echo "cd /root; /usr/local/bin/gunicorn number:app -b 0.0.0.0:80 -D --error-logfile /root/error_log --access-logfile /root/access_log" > /etc/init.d/gunicorn-number
chmod +x /etc/init.d/gunicorn-number
update-rc.d gunicorn-number defaults
There also is a script that randomly generates I/O in /root/io.sh:
#!/bin/sh
# Randomly generates high I/O on disk. Goal is to write on disk so that
# it flushes at the same time that snapshot of disk image is done, to check if
# it doesn't corrupt image.
# Ayayo!
while [ 1 ]; do
dd if=/dev/urandom of=random count=2k
sync
sleep 0.2
done
Then create the boot script:
echo "/bin/sh /root/io.sh &" > /etc/init.d/io
chmod +x /etc/init.d/io
update-rc.d io defaults
"""
slap = slapos.slap.slap()
slap.initializeConnection(server_url, key_file, cert_file)
......@@ -142,9 +174,6 @@ def runTestSuite(server_url, key_file, cert_file,
ip = fetchMainInstanceIP(partition, software, kvm_rootinstance_name)
logger.info('KVM IP is %s.' % ip)
key = setRandomKey(ip)
logger.info('Key set for test in current KVM: %s.' % key)
# In resilient stack, main instance (example with KVM) is named "kvm0",
# clones are named "kvm1", "kvm2", ...
clone_count = int(total_instance_count) - 1
......@@ -154,23 +183,34 @@ def runTestSuite(server_url, key_file, cert_file,
# Test each clone
while current_clone <= clone_count:
logger.info('Testing kvm%s.' % current_clone)
# Wait for XX minutes so that replication is done
sleep_time = 60 * 15#2 * 60 * 60
logger.info('Sleeping for %s seconds.' % sleep_time)
time.sleep(sleep_time)
key = setRandomKey(ip)
logger.info('Key set for test in current KVM: %s.' % key)
logger.info('Sleeping for %s seconds.' % SLEEP_TIME)
time.sleep(SLEEP_TIME)
# Make the clone instance takeover the main instance
logger.info('Replacing main instance by clone instance...')
takeover(
server_url=server_url,
key_file=key_file,
cert_file=cert_file,
computer_guid=computer_id,
partition_id=partition_id,
software_release=software,
namebase=namebase,
winner_instance_suffix=str(current_clone),
)
for i in range(0, 10):
try:
takeover(
server_url=server_url,
key_file=key_file,
cert_file=cert_file,
computer_guid=computer_id,
partition_id=partition_id,
software_release=software,
namebase=namebase,
winner_instance_suffix=str(current_clone),
)
break
except: # SSLError
traceback.print_exc()
if i is 9:
raise
logger.warning('takeover failed. Retrying...')
time.sleep(10)
logger.info('Done.')
# Wait for the new IP (of old-clone new-main instance) to appear.
......
......@@ -42,6 +42,7 @@ class ResiliencyTestSuite(object):
computer_id, partition_id, software,
namebase,
root_instance_name,
sleep_time_between_test=600,
total_instance_count="3"):
self.server_url = server_url
self.key_file = key_file
......@@ -52,6 +53,7 @@ class ResiliencyTestSuite(object):
self.namebase = namebase
self.total_instance_count = total_instance_count
self.root_instance_name = root_instance_name
self.sleep_time_between_test = sleep_time_between_test
slap = slapos.slap.slap()
slap.initializeConnection(server_url, key_file, cert_file)
......@@ -151,12 +153,12 @@ class ResiliencyTestSuite(object):
# Test each clone
while current_clone <= clone_count:
# Wait for XX minutes so that replication is done
sleep_time = 60 * 15#2 * 60 * 60
self.logger.info('Sleeping for %s seconds before testing clone %s.' % (
sleep_time,
self.sleep_time_between_test,
current_clone
))
time.sleep(sleep_time)
time.sleep(self.sleep_time_between_test)
self._doTakeover(current_clone)
self.logger.info('Testing %s%s instance.' % (self.namebase, current_clone))
success = self.checkDataOnCloneInstance()
......
......@@ -50,22 +50,19 @@ class SlaprunnerTestSuite(ResiliencyTestSuite):
# Setup urllib2 with cookie support
cookie_jar = cookielib.CookieJar()
self._opener_director = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie_jar))
self._opener_director = urllib2.build_opener(
urllib2.HTTPCookieProcessor(cookie_jar)
)
ResiliencyTestSuite.__init__(
self,
server_url, key_file, cert_file,
computer_id, partition_id, software,
namebase,
slaprunner_rootinstance_name
slaprunner_rootinstance_name,
300
)
def generateData(self):
self.slaprunner_password = ''.join(random.SystemRandom().sample(string.ascii_lowercase, 8))
self.slaprunner_user = 'slapos'
self.logger.info('Generated slaprunner user is: %s' % self.slaprunner_user)
self.logger.info('Generated slaprunner password is: %s' % self.slaprunner_password)
def _connectToSlaprunner(self, resource, data=None):
"""
Utility.
......@@ -84,23 +81,92 @@ class SlaprunnerTestSuite(ResiliencyTestSuite):
def _login(self):
self.logger.debug('Logging in...')
self._connectToSlaprunner('doLogin', data='clogin=%s&cpwd=%s' % (self.slaprunner_user, self.slaprunner_password))
self._connectToSlaprunner('doLogin', data='clogin=%s&cpwd=%s' % (
self.slaprunner_user,
self.slaprunner_password)
)
def _retrieveInstanceLogFile(self):
"""
Store the logfile (=data) of the instance, check it is not empty nor it is html.
Store the logfile (=data) of the instance, check it is not empty nor it is
html.
"""
data = self._connectToSlaprunner(resource='fileBrowser', data='opt=9&filename=log.log&dir=instance_root%252Fslappart0%252Fvar%252Flog%252F')
data = self._connectToSlaprunner(
resource='fileBrowser',
data='opt=9&filename=log.log&dir=instance_root%252Fslappart0%252Fvar%252Flog%252F'
)
self.logger.info('Retrieved data are:\n%s' % data)
if data.find('<') is not -1:
raise IOError('Could not retrieve logfile content: retrieved content is html.')
raise IOError(
'Could not retrieve logfile content: retrieved content is html.'
)
if data.find('Could not load') is not -1:
raise IOError('Could not retrieve logfile content: server could not load the file.')
raise IOError(
'Could not retrieve logfile content: server could not load the file.'
)
if data.find('Hello') is -1:
raise IOError('Could not retrieve logfile content: retrieve content does not match "Hello".')
raise IOError(
'Could not retrieve logfile content: retrieve content does not match "Hello".'
)
return data
def _waitForSoftwareBuild(self):
while self._connectToSlaprunner(resource='slapgridResult', data='position=0&log=').find('"software": true') is not -1:
self.logger.info('Software release is still building. Sleeping...')
time.sleep(15)
self.logger.info('Software Release has been built / is no longer building.')
def _buildSoftwareRelease(self):
self.logger.info('Building the Software Release...')
try:
self._connectToSlaprunner(resource='runSoftwareProfile')
except (NotHttpOkException, urllib2.HTTPError):
# The nginx frontend might timeout before software release is finished.
pass
self._waitForSoftwareBuild()
def _deployInstance(self):
self.logger.info('Deploying instance...')
try:
self._connectToSlaprunner(resource='runInstanceProfile')
except (NotHttpOkException, urllib2.HTTPError):
# The nginx frontend might timeout before someftware release is finished.
pass
while self._connectToSlaprunner(resource='slapgridResult', data='position=0&log=').find('"instance": true') is not -1:
self.logger.info('Buildout is still running. Sleeping...')
time.sleep(15)
self.logger.info('Instance has been deployed.')
def _gitClone(self):
self.logger.debug('Doing git clone of git.erp5.org/repos/slapos.git...')
try:
self._connectToSlaprunner(
resource='cloneRepository',
data='repo=http://git.erp5.org/repos/slapos.git&name=workspace/slapos&email=slapos@slapos.org&user=slapos'
)
except (NotHttpOkException, urllib2.HTTPError):
# cloning can be very long.
# XXX: quite dirty way to check.
while self._connectToSlaprunner('getProjectStatus', data='project=workspace/slapos').find('On branch master') is -1:
self.logger.info('git-cloning ongoing, sleeping...')
def _openSoftwareRelease(self, software_name):
self.logger.debug('Opening %s software release...' % software_name)
self._connectToSlaprunner(
resource='setCurrentProject',
data='path=workspace/slapos/software/%s/' % software_name
)
def generateData(self):
self.slaprunner_password = ''.join(
random.SystemRandom().sample(string.ascii_lowercase, 8)
)
self.slaprunner_user = 'slapos'
self.logger.info('Generated slaprunner user is: %s' % self.slaprunner_user)
self.logger.info('Generated slaprunner password is: %s' % self.slaprunner_password)
def pushDataOnMainInstance(self):
"""
Create a dummy Software Release,
......@@ -117,47 +183,26 @@ class SlaprunnerTestSuite(ResiliencyTestSuite):
slaprunner_recovery_code = parameter_dict['password_recovery_code']