Commit cfbed313 authored by Nicolas Wavrant's avatar Nicolas Wavrant

Merge remote-tracking branch 'origin/master'

Conflicts:
	slapos/runner/gittools.py
parents 7c98e0a2 022fcb2d
0.37.4 (2013-10-15)
-------------------
* Improve QEMU QMP wrapper by adding drive-backup method and other helpers. [0afb7d6, 95d0c8b]
0.37.3 (2013-10-10)
-------------------
* pubsub: don't swallow output of subprocess to allow debug. [c503484]
0.37.2 (2013-10-10)
-------------------
* Add QEMU QMP wrapper. [9e819a8]
* KVM resiliency test: update docstring about how to setup disk image. [dbe347f]
* KVM resiliency test: change key for each clone. [7ef1db3]
0.37.1 (2013-10-03)
-------------------
* pubsub notifier: handle timeout and other connection errors. [ac4c75c]
* equeue: cast str(timestamp) to please gdbm. [8b067d6]
0.37 (2013-09-30)
=================
* equeue: log output of subprocess. [1694937]
* slaprunner: don't send 200 when login is bad. [4a8e10bf]
* Improve reliability of resiliency tests.
0.36 (2013-09-05)
=================
......
......@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
import glob
import os
version = '0.36.1-dev'
version = '0.37.4'
name = 'slapos.toolbox'
long_description = open("README.txt").read() + "\n" + \
open("CHANGES.txt").read() + "\n"
......@@ -70,6 +70,7 @@ setup(name=name,
'onetimeupload = slapos.onetimeupload:main',
'pubsubnotifier = slapos.pubsub.notifier:main',
'pubsubserver = slapos.pubsub:main',
'qemu-qmp-client = slapos.qemuqmpclient:main',
'shacache = slapos.shacache:main',
'slapbuilder = slapos.builder:main',
'slapcontainer = slapos.container:main',
......
......@@ -72,6 +72,26 @@ class EqueueServer(SocketServer.ThreadingUnixStreamServer):
def setDB(self, database):
self.db = gdbm.open(database, 'cs', 0700)
def _runCommandIfNeeded(self, command, timestamp):
with self.lock:
if command in self.db and timestamp <= int(self.db[command]):
self.logger.info("%s already run.", command)
return
self.logger.info("Running %s, %s with output:", command, timestamp)
try:
self.logger.info(
subprocess.check_output([command], stderr=subprocess.STDOUT)
)
self.logger.info("%s finished successfully.", command)
except subprocess.CalledProcessError as e:
self.logger.warning("%s exited with status %s. output is: \n %s" % (
command,
e.returncode,
e.output,
))
self.db[command] = str(timestamp)
def process_request_thread(self, request, client_address):
# Handle request
self.logger.debug("Connection with file descriptor %d", request.fileno())
......@@ -102,23 +122,7 @@ class EqueueServer(SocketServer.ThreadingUnixStreamServer):
except:
self.logger.warning("Couldn't respond to %r", request.fileno())
self.close_request(request)
# Run command if needed
with self.lock:
if command not in self.db or timestamp > int(self.db[command]):
self.logger.info("Running %s, %s", command, timestamp)
# XXX stdout and stderr redirected to null as they are not read
with open(os.devnull, 'r+') as f:
status = subprocess.call([command], close_fds=True,
stdin=f, stdout=f, stderr=f)
if status:
self.logger.warning("%s finished with non zero status.",
command)
else:
self.logger.info("%s finished successfully.", command)
self.db[command] = timestamp
else:
self.logger.info("%s already runned.", command)
self._runCommandIfNeeded(command, timestamp)
# Well the following function is made for schrodinger's files,
# It will work if the file exists or not
def remove_existing_file(path):
......
......@@ -4,11 +4,11 @@
import argparse
import csv
import httplib
import os
import socket
import subprocess
import sys
import time
import traceback
import urllib2
import urlparse
import uuid
......@@ -31,28 +31,23 @@ def main():
args = parser.parse_args()
with open(os.devnull) as devnull:
command = subprocess.Popen(args.executable[0],
stdin=subprocess.PIPE,
stdout=devnull,
stderr=subprocess.PIPE,
close_fds=True)
command.stdin.flush()
command.stdin.close()
command_failed = (command.wait() != 0)
command_stderr = command.stderr.read()
if command_failed:
content = ("<p>Failed with returncode <em>%d</em>.</p>"
"<p>Standard error output is :</p><pre>%s</pre>") % (
command.poll(),
command_stderr.replace('&', '&amp;')\
.replace('<', '&lt;')\
.replace('>', '&gt;'),
)
else:
content = "<p>Everything went well.</p>"
try:
content = subprocess.check_output(
args.executable[0],
stderr=subprocess.STDOUT
)
exit_code = 0
except subprocess.CalledProcessError as e:
content = e.output
exit_code = e.returncode
print content
content += ("\n<p>Failed with returncode <em>%d</em>.</p>"
"<p>Output is: </p><pre>%s</pre>" % (
exit_code,
content.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
))
with open(args.logfile[0], 'a') as file_:
cvsfile = csv.writer(file_)
......@@ -63,9 +58,8 @@ def main():
'slapos:%s' % uuid.uuid4(),
])
if command_failed:
sys.stderr.write('%s\n' % command_stderr)
sys.exit(1)
if exit_code != 0:
sys.exit(exit_code)
print 'Fetching %s feed...' % args.feed_url[0]
......@@ -80,14 +74,18 @@ def main():
notification_port = socket.getservbyname(notification_url.scheme)
headers = {'Content-Type': feed.info().getheader('Content-Type')}
notification = httplib.HTTPConnection(notification_url.hostname,
notification_port)
notification.request('POST', notification_url.path, body, headers)
response = notification.getresponse()
if not (200 <= response.status < 300):
sys.stderr.write("The remote server at %s didn't send a successful reponse.\n" % notif_url)
sys.stderr.write("Its response was %r\n" % response.reason)
try:
notification = httplib.HTTPConnection(notification_url.hostname,
notification_port)
notification.request('POST', notification_url.path, body, headers)
response = notification.getresponse()
if not (200 <= response.status < 300):
sys.stderr.write("The remote server at %s didn't send a successful reponse.\n" % notif_url)
sys.stderr.write("Its response was %r\n" % response.reason)
some_notification_failed = True
except socket.error as exc:
sys.stderr.write("Connection with remote server at %s failed:\n" % notif_url)
sys.stderr.write(traceback.format_exc(exc))
some_notification_failed = True
if some_notification_failed:
......
##############################################################################
#
# Copyright (c) 2013 Vifib SARL and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import argparse
import json
import os
import pprint
import socket
import time
def parseArgument():
"""
Very basic argument parser. Might blow up for anything else than
"./executable mysocket.sock stop/resume".
"""
parser = argparse.ArgumentParser()
parser.add_argument('--suspend', action='store_const', dest='action', const='suspend')
parser.add_argument('--resume', action='store_const', dest='action', const='resume')
parser.add_argument('--create-snapshot', action='store_const', dest='action', const='createSnapshot')
parser.add_argument('--create-internal-snapshot', action='store_const', dest='action', const='createInternalSnapshot')
parser.add_argument('--delete-internal-snapshot', action='store_const', dest='action', const='deleteInternalSnapshot')
parser.add_argument('--drive-backup', action='store_const', dest='action', const='driveBackup')
parser.add_argument('--query-commands', action='store_const', dest='action', const='queryCommands')
parser.add_argument('--socket', dest='unix_socket_location', required=True)
parser.add_argument('remainding_argument_list', nargs=argparse.REMAINDER)
args = parser.parse_args()
return args.unix_socket_location, args.action, args.remainding_argument_list
class QemuQMPWrapper(object):
"""
Small wrapper around Qemu's QMP to control a qemu VM.
See http://git.qemu.org/?p=qemu.git;a=blob;f=qmp-commands.hx for
QMP API definition.
"""
def __init__(self, unix_socket_location):
self.socket = self.connectToQemu(unix_socket_location)
self.capabilities()
@staticmethod
def connectToQemu(unix_socket_location):
"""
Create a socket, connect to qemu, be sure it answers, return socket.
"""
if not os.path.exists(unix_socket_location):
raise Exception('unix socket %s does not exist.' % unix_socket_location)
print 'Connecting to qemu...'
so = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
connected = False
while not connected:
try:
so.connect(unix_socket_location)
except socket.error:
time.sleep(1)
print 'Could not connect, retrying...'
else:
connected = True
so.recv(1024)
return so
def _send(self, message):
self.socket.send(json.dumps(message))
data = self.socket.recv(65535)
try:
return json.loads(data)
except ValueError:
print 'Wrong data: %s' % data
def _getVMStatus(self):
response = self._send({'execute': 'query-status'})
if response:
return self._send({'execute': 'query-status'})['return']['status']
else:
raise IOError('Empty answer')
def _waitForVMStatus(self, wanted_status):
while True:
try:
actual_status = self._getVMStatus()
if actual_status == wanted_status:
return
else:
print 'VM in %s status, wanting it to be %s, retrying...' % (
actual_status, wanted_status)
time.sleep(1)
except IOError:
print 'VM not ready, retrying...'
def capabilities(self):
print 'Asking for capabilities...'
self._send({'execute': 'qmp_capabilities'})
def suspend(self):
print 'Suspending VM...'
self._send({'execute': 'stop'})
self._waitForVMStatus('paused')
def resume(self):
print 'Resuming VM...'
self._send({'execute': 'cont'})
self._waitForVMStatus('running')
def _queryBlockJobs(self, device):
return self._send({'execute': 'query-block-jobs'})
def _getRunningJobList(self, device):
result = self._queryBlockJobs(device)
if result.get('return'):
return result['return']
else:
return
def driveBackup(self, backup_target, source_device='virtio0', sync_type='full'):
print 'Asking Qemu to perform backup to %s' % backup_target
# XXX: check for error
self._send({
'execute': 'drive-backup',
'arguments': {
'device': source_device,
'sync': sync_type,
'target': backup_target,
}
})
while self._getRunningJobList(backup_target):
print 'Job is not finished yet.'
time.sleep(20)
def createSnapshot(self, snapshot_file, device='virtio0'):
print self._send({
'execute': 'blockdev-snapshot-sync',
'arguments': {
'device': device,
'snapshot-file': snapshot_file,
}
})
def createInternalSnapshot(self, name=None, device='virtio0'):
if name is None:
name = int(time.time())
self._send({
'execute': 'blockdev-snapshot-internal-sync',
'arguments': {
'device': device,
'name': name,
}
})
def deleteInternalSnapshot(self, name, device='virtio0'):
self._send({
'execute': 'blockdev-snapshot-delete-internal-sync',
'arguments': {
'device': device,
'name': name,
}
})
def queryCommands(self):
pprint.pprint(self._send({'execute': 'query-commands'})['return'])
def main():
unix_socket_location, action, remainding_argument_list = parseArgument()
qemu_wrapper = QemuQMPWrapper(unix_socket_location)
if remainding_argument_list:
getattr(qemu_wrapper, action)(*remainding_argument_list)
else:
getattr(qemu_wrapper, action)()
if __name__ == '__main__':
main()
......@@ -15,3 +15,32 @@ This module contains:
* A Resiliency Test Suite framework (in suites/), used to easily write new
test suites
* A list of test suites
TODO :
* Check that each partition is in different slapos node.
* Test for bang calls
* Be able to configure from ERP5 Master (i.e from instance parameters): count of PBS/clones, then test several possibilities (so called "count" in test suite)
* Use Nexedi ERP5, when in production.
* Put the KVM disk image in a safe place.
------------
For reference: How-to deploy the whole test system
1/ Deploy a SlapOS Master
2/ Deploy an ERP5, install erp5_test_result BT with scalability feature (current in scalability-master2 branch of erp5.git) (currently, had to change a few lines in the scalability extension of the portal_class, should be commited)
3/ Configure 3 nodes in the new SlapOS Master, deploy in each a testnode with scalability feature (erp5testnode-scalability branch of slapos.git) with parameters like:
<?xml version="1.0" encoding="utf-8"?>
<instance>
<parameter id="test-node-title">COMP-0-Testnode</parameter>
<parameter id="test-suite-master-url">https://zope:insecure@softinst43496.host.vifib.net/erp5/portal_task_distribution/1</parameter>
</instance>
3bis/ Supply and request http://git.erp5.org/gitweb/slapos.git/blob_plain/refs/tags/slapos-0.92:/software/kvm/software.cfg on a public node (so that vnc frontends are ok). "domain" parameter should be [ipv6] of partition. ipv4:4443 should be 6tunnelled to ipv6:4443 (Note: here, instead, I just hacked kvm_frontend to listen on ipv6).
3ter/ Supply and request http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg, with any "domain" (it won't be used), on a public node (so that web frontends are ok)
4/ On the ERP5 instance, create a project, a Task Distribution (in portal_task_distribution, type Scalability Task Distribution)
5/ On the ERP5 instance, create a Test Suite, validate it
Note: the slapos nodes are currently deployed using slapos-in-partition.
Note: you have to manually kill -10 the erp5testnode process to start deployment of test because it doesn't know when SR installation is finished.
Note: you have to manually run slapos-node-software --all on the slapos nodes if you are developping the SR you are testing.
......@@ -37,10 +37,6 @@ import traceback
from erp5.util import taskdistribution
from erp5.util.testnode import Utils
MAX_INSTALLATION_TIME = 60 * 50
MAX_TESTING_TIME = 60
MAX_GETTING_CONNECTION_TIME = 60 * 5
def importFrom(name):
"""
Import a test suite module (in the suites module) and return it.
......@@ -147,7 +143,7 @@ class ScalabilityLauncher(object):
Return a ScalabilityTest with current running test case informations,
or None if no test_case ready
"""
data = self.test_result.getNextTestCase()
data = self.test_result.getRunningTestCase()
if data == None:
return None
decoded_data = Utils.deunicodeData(json.loads(
......
......@@ -42,6 +42,10 @@ import urllib
logger = logging.getLogger('KVMResiliencyTest')
# Wait for 2 hours before renaming, so that replication of data is done
# (~1GB of data to backup)
SLEEP_TIME = 2 * 60 * 60
def fetchMainInstanceIP(current_partition, software_release, instance_name):
return current_partition.request(
software_release=software_release,
......@@ -105,7 +109,9 @@ def runTestSuite(server_url, key_file, cert_file,
3/ Resilience is done, wait XX seconds
4/ For each clone: do a takeover. Check that IPv6 of new main instance is different. Check, when doing a http request to the new VM that will fetch the stored random number, that the sent number is the same.
Note: disk image is a simple debian with the following python code running at boot:
Note: disk image is a simple debian with gunicorn and flask installed:
apt-get install python-setuptools; easy_install gunicorn flask
With the following python code running at boot in /root/number.py:
import os
......@@ -115,7 +121,7 @@ def runTestSuite(server_url, key_file, cert_file,
storage = 'storage.txt'
@app.route("/")
def greeting_list(): # 'cause they are several greetings, and plural is forbidden.
def greeting_list(): # 'cause there are several greetings, and plural is forbidden.
return "Hello World"
@app.route("/get")
......@@ -124,13 +130,39 @@ def runTestSuite(server_url, key_file, cert_file,
@app.route("/set")
def set():
if os.path.exists(storage):
abort(503)
#if os.path.exists(storage):
# abort(503)
open(storage, 'w').write(request.args['key'])
return "OK"
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80)
Then create the boot script:
echo "cd /root; /usr/local/bin/gunicorn number:app -b 0.0.0.0:80 -D --error-logfile /root/error_log --access-logfile /root/access_log" > /etc/init.d/gunicorn-number
chmod +x /etc/init.d/gunicorn-number
update-rc.d gunicorn-number defaults
There also is a script that randomly generates I/O in /root/io.sh:
#!/bin/sh
# Randomly generates high I/O on disk. Goal is to write on disk so that
# it flushes at the same time that snapshot of disk image is done, to check if
# it doesn't corrupt image.
# Ayayo!
while [ 1 ]; do
dd if=/dev/urandom of=random count=2k
sync
sleep 0.2
done
Then create the boot script:
echo "/bin/sh /root/io.sh &" > /etc/init.d/io
chmod +x /etc/init.d/io
update-rc.d io defaults
"""
slap = slapos.slap.slap()
slap.initializeConnection(server_url, key_file, cert_file)
......@@ -142,9 +174,6 @@ def runTestSuite(server_url, key_file, cert_file,
ip = fetchMainInstanceIP(partition, software, kvm_rootinstance_name)
logger.info('KVM IP is %s.' % ip)
key = setRandomKey(ip)
logger.info('Key set for test in current KVM: %s.' % key)
# In resilient stack, main instance (example with KVM) is named "kvm0",
# clones are named "kvm1", "kvm2", ...
clone_count = int(total_instance_count) - 1
......@@ -154,23 +183,34 @@ def runTestSuite(server_url, key_file, cert_file,
# Test each clone
while current_clone <= clone_count:
logger.info('Testing kvm%s.' % current_clone)
# Wait for XX minutes so that replication is done
sleep_time = 60 * 15#2 * 60 * 60
logger.info('Sleeping for %s seconds.' % sleep_time)
time.sleep(sleep_time)
key = setRandomKey(ip)
logger.info('Key set for test in current KVM: %s.' % key)
logger.info('Sleeping for %s seconds.' % SLEEP_TIME)
time.sleep(SLEEP_TIME)
# Make the clone instance takeover the main instance
logger.info('Replacing main instance by clone instance...')
takeover(
server_url=server_url,
key_file=key_file,
cert_file=cert_file,
computer_guid=computer_id,
partition_id=partition_id,
software_release=software,
namebase=namebase,
winner_instance_suffix=str(current_clone),
)
for i in range(0, 10):
try:
takeover(
server_url=server_url,
key_file=key_file,
cert_file=cert_file,
computer_guid=computer_id,
partition_id=partition_id,
software_release=software,
namebase=namebase,
winner_instance_suffix=str(current_clone),
)
break
except: # SSLError
traceback.print_exc()
if i is 9:
raise
logger.warning('takeover failed. Retrying...')
time.sleep(10)
logger.info('Done.')
# Wait for the new IP (of old-clone new-main instance) to appear.
......
......@@ -42,6 +42,7 @@ class ResiliencyTestSuite(object):
computer_id, partition_id, software,
namebase,
root_instance_name,
sleep_time_between_test=600,
total_instance_count="3"):
self.server_url = server_url
self.key_file = key_file
......@@ -52,6 +53,7 @@ class ResiliencyTestSuite(object):
self.namebase = namebase
self.total_instance_count = total_instance_count
self.root_instance_name = root_instance_name
self.sleep_time_between_test = sleep_time_between_test
slap = slapos.slap.slap()
slap.initializeConnection(server_url, key_file, cert_file)
......@@ -151,12 +153,12 @@ class ResiliencyTestSuite(object):
# Test each clone
while current_clone <= clone_count:
# Wait for XX minutes so that replication is done
sleep_time = 60 * 15#2 * 60 * 60
self.logger.info('Sleeping for %s seconds before testing clone %s.' % (
sleep_time,
self.sleep_time_between_test,
current_clone
))
time.sleep(sleep_time)
time.sleep(self.sleep_time_between_test)
self._doTakeover(current_clone)
self.logger.info('Testing %s%s instance.' % (self.namebase, current_clone))
success = self.checkDataOnCloneInstance()
......
......@@ -50,22 +50,19 @@ class SlaprunnerTestSuite(ResiliencyTestSuite):
# Setup urllib2 with cookie support
cookie_jar = cookielib.CookieJar()
self._opener_director = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie_jar))
self._opener_director = urllib2.build_opener(
urllib2.HTTPCookieProcessor(cookie_jar)
)
ResiliencyTestSuite.__init__(
self,
server_url, key_file, cert_file,
computer_id, partition_id, software,
namebase,
slaprunner_rootinstance_name
slaprunner_rootinstance_name,
300
)
def generateData(self):
self.slaprunner_password = ''.join(random.SystemRandom().sample(string.ascii_lowercase, 8))
self.slaprunner_user = 'slapos'
self.logger.info('Generated slaprunner user is: %s' % self.slaprunner_user)
self.logger.info('Generated slaprunner password is: %s' % self.slaprunner_password)
def _connectToSlaprunner(self, resource, data=None):
"""
Utility.
......@@ -84,23 +81,92 @@ class SlaprunnerTestSuite(ResiliencyTestSuite):
def _login(self):
self.logger.debug('Logging in...')
self._connectToSlaprunner('doLogin', data='clogin=%s&cpwd=%s' % (self.slaprunner_user, self.slaprunner_password))
self._connectToSlaprunner('doLogin', data='clogin=%s&cpwd=%s' % (
self.slaprunner_user,
self.slaprunner_password)
)
def _retrieveInstanceLogFile(self):
"""
Store the logfile (=data) of the instance, check it is not empty nor it is html.
Store the logfile (=data) of the instance, check it is not empty nor it is
html.
"""
data = self._connectToSlaprunner(resource='fileBrowser', data='opt=9&filename=log.log&dir=instance_root%252Fslappart0%252Fvar%252Flog%252F')
data = self._connectToSlaprunner(
resource='fileBrowser',
data='opt=9&filename=log.log&dir=instance_root%252Fslappart0%252Fvar%252Flog%252F'
)
self.logger.info('Retrieved data are:\n%s' % data)
if data.find('<') is not -1:
raise IOError('Could not retrieve logfile content: retrieved content is html.')
raise IOError(
'Could not retrieve logfile content: retrieved content is html.'
)
if data.find('Could not load') is not -1:
raise IOError('Could not retrieve logfile content: server could not load the file.')
raise IOError(
'Could not retrieve logfile content: server could not load the file.'
)
if data.find('Hello') is -1:
raise IOError('Could not retrieve logfile content: retrieve content does not match "Hello".')
raise IOError(
'Could not retrieve logfile content: retrieve content does not match "Hello".'
)
return data
def _waitForSoftwareBuild(self):
while self._connectToSlaprunner(resource='slapgridResult', data='position=0&log=').find('"software": true') is not -1:
self.logger.info('Software release is still building. Sleeping...')
time.sleep(15)
self.logger.info('Software Release has been built / is no longer building.')
def _buildSoftwareRelease(self):
self.logger.info('Building the Software Release...')
try:
self._connectToSlaprunner(resource='runSoftwareProfile')
except (NotHttpOkException, urllib2.HTTPError):
# The nginx frontend might timeout before software release is finished.
pass
self._waitForSoftwareBuild()
def _deployInstance(self):
self.logger.info('Deploying instance...')
try:
self._connectToSlaprunner(resource='runInstanceProfile')
except (NotHttpOkException, urllib2.HTTPError):
# The nginx frontend might timeout before someftware release is finished.
pass
while self._connectToSlaprunner(resource='slapgridResult', data='position=0&log=').find('"instance": true') is not -1:
self.logger.info('Buildout is still running. Sleeping...')
time.sleep(15)
self.logger.info('Instance has been deployed.')
def _gitClone(self):
self.logger.debug('Doing git clone of git.erp5.org/repos/slapos.git...')
try:
self._connectToSlaprunner(
resource='cloneRepository',
data='repo=http://git.erp5.org/repos/slapos.git&name=workspace/slapos&email=slapos@slapos.org&user=slapos'
)
except (NotHttpOkException, urllib2.HTTPError):
# cloning can be very long.
# XXX: quite dirty way to check.
while self._connectToSlaprunner('getProjectStatus', data='project=workspace/slapos').find('On branch master') is -1:
self.logger.info('git-cloning ongoing, sleeping...')
def _openSoftwareRelease(self, software_name):
self.logger.debug('Opening %s software release...' % software_name)
self._connectToSlaprunner(
resource='setCurrentProject',
data='path=workspace/slapos/software/%s/' % software_name
)
def generateData(self):
self.slaprunner_password = ''.join(
random.SystemRandom().sample(string.ascii_lowercase, 8)
)
self.slaprunner_user = 'slapos'
self.logger.info('Generated slaprunner user is: %s' % self.slaprunner_user)
self.logger.info('Generated slaprunner password is: %s' % self.slaprunner_password)
def pushDataOnMainInstance(self):
"""
Create a dummy Software Release,
......@@ -117,47 +183,26 @@ class SlaprunnerTestSuite(ResiliencyTestSuite):
slaprunner_recovery_code = parameter_dict['password_recovery_code']
self.logger.debug('Creating the slaprunner account...')
self._connectToSlaprunner(resource='configAccount', data='name=slapos&username=%s&email=slapos@slapos.org&password=%s&rcode=%s' % (self.slaprunner_user, self.slaprunner_password, slaprunner_recovery_code))
self._connectToSlaprunner(
resource='configAccount',
data='name=slapos&username=%s&email=slapos@slapos.org&password=%s&rcode=%s' % (
self.slaprunner_user,
self.slaprunner_password,
slaprunner_recovery_code
)
)
self._login()
self.logger.debug('Opening hello-world software release from git...')
try:
self._connectToSlaprunner(resource='cloneRepository', data='repo=http://git.erp5.org/repos/slapos.git&name=workspace/slapos&email=slapos@slapos.org&user=slapos')
except (NotHttpOkException, urllib2.HTTPError):
# cloning can be very long.
# XXX: quite dirty way to check.
while self._connectToSlaprunner('getProjectStatus', data='project=workspace/slapos').find('On branch master') is -1:
self.logger.info('git-cloning ongoing, sleeping...')
self._gitClone()
# XXX should be taken from parameter.
self._connectToSlaprunner(resource='setCurrentProject', data='path=workspace/slapos/software/helloworld/')
self._openSoftwareRelease('helloworld')
self.logger.info('Building the Software Release...')
try:
self._connectToSlaprunner(resource='runSoftwareProfile')
except (NotHttpOkException, urllib2.HTTPError):
# The nginx frontend might timeout before software release is finished.
pass
while self._connectToSlaprunner(resource='slapgridResult', data='position=0&log=').find('"software": true') is not -1:
self.logger.info('Buildout is still running. Sleeping...')
time.sleep(15)
self.logger.info('Software Release has been built.')
self.logger.info('Deploying instance...')
try:
self._connectToSlaprunner(resource='runInstanceProfile')
except (NotHttpOkException, urllib2.HTTPError):
# The nginx frontend might timeout before someftware release is finished.
pass
while self._connectToSlaprunner(resource='slapgridResult', data='position=0&log=').find('"instance": true') is not -1:
self.logger.info('Buildout is still running. Sleeping...')
time.sleep(15)
self.logger.info('Instance has been deployed.')
self._buildSoftwareRelease()
self._deployInstance()
self.data = self._retrieveInstanceLogFile()
def checkDataOnCloneInstance(self):
"""
Check that:
......@@ -173,6 +218,10 @@ class SlaprunnerTestSuite(ResiliencyTestSuite):
old_parameter_value=old_slaprunner_backend_url
)
self._login()
self._waitForSoftwareBuild()
# XXX: in theory, it should be done automatically by slaprunner.
# In practice, it is still too dangerous for ERP5 instances.
self._deployInstance()
new_data = self._retrieveInstanceLogFile()
if new_data == self.data:
......
......@@ -21,8 +21,7 @@ class Parser(OptionParser):
"""
Initialize all possible options.
"""
OptionParser.__init__(self, usage=usage, version=version,
option_list=[
option_list = [
Option("-l", "--log_file",
help="The path to the log file used by the script.",
type=str),
......@@ -38,7 +37,10 @@ class Parser(OptionParser):
default=False,
action="store_true",
help="Debug mode."),
])
]
OptionParser.__init__(self, usage=usage, version=version,
option_list=option_list)
def check_args(self):
"""
......@@ -50,6 +52,7 @@ class Parser(OptionParser):
return options, args[0]
class Config:
def __init__(self):
self.configuration_file_path = None
......@@ -125,6 +128,7 @@ def run():
sys.exit(return_code)
def serve(config):
from views import app
from werkzeug.contrib.fixers import ProxyFix
......@@ -134,7 +138,7 @@ def serve(config):
app.config.update(
software_log=config.software_root.rstrip('/') + '.log',
instance_log=config.instance_root.rstrip('/') + '.log',
workspace = workdir,
workspace=workdir,
software_link=software_link,
instance_profile='instance.cfg',
software_profile='software.cfg',
......
......@@ -11,4 +11,3 @@ def as_json(f):
def inner(*args, **kwargs):
return Response(json.dumps(f(*args, **kwargs)), mimetype='application/json')
return inner
......@@ -15,7 +15,7 @@ from slapos.runner.utils import realpath, tail, isText
class FileBrowser(object):
"""This class contain all bases function for file browser"""
"""This class contains all base functions for file browser"""
def __init__(self, config):
self.config = config
......@@ -31,19 +31,19 @@ class FileBrowser(object):
html = 'var gsdirs = [], gsfiles = [];'
dir = urllib.unquote(dir)
# 'dir' is used below. XXX should not shadow a builtin name
# XXX-Marco 'dir' and 'all' should not shadow builtin names
realdir = realpath(self.config, dir)
if not realdir:
raise NameError('Could not load directory %s: Permission denied' % dir)
ldir = sorted(os.listdir(realdir), key=str.lower)
for f in ldir:
if f.startswith('.') and not all: #do not display this file/folder
if f.startswith('.') and not all: # do not display this file/folder
continue
ff = os.path.join(dir, f)
realfile = os.path.join(realdir, f)
mdate = datetime.datetime.fromtimestamp(os.path.getmtime(realfile)
).strftime("%Y-%d-%m %I:%M")
).strftime("%Y-%d-%m %I:%M")
md5sum = md5.md5(realfile).hexdigest()
if not os.path.isdir(realfile):
size = os.path.getsize(realfile)
......@@ -53,25 +53,20 @@ class FileBrowser(object):
ext = "unknow"
else:
ext = str.lower(ext)
html += 'gsfiles.push(new gsItem("1", "' + f + '", "' + \
ff + '", "' + str(size) + '", "' + md5sum + \
'", "' + ext + '", "' + mdate + '"));'
html += 'gsfiles.push(new gsItem("1", "%s", "%s", "%s", "%s", "%s", "%s"));' % (f, ff, size, md5sum, ext, mdate)
else:
html += 'gsdirs.push(new gsItem("2", "' + f + '", "' + \
ff + '", "0", "' + md5sum + '", "dir", "' + mdate + '"));'
html += 'gsdirs.push(new gsItem("2", "%s", "%s", "0", "%s", "dir", "%s"));' % (f, ff, md5sum, mdate)
return html
def makeDirectory(self, dir, filename):
"""Create a directory"""
realdir = self._realdir(dir)
folder = os.path.join(realdir, filename)
if not os.path.exists(folder):
os.mkdir(folder, 0744)
return '{result: \'1\'}'
return "{result: '1'}"
else:
return '{result: \'0\'}'
return "{result: '0'}"
def makeFile(self, dir, filename):
"""Create a file in a directory dir taken"""
......@@ -79,36 +74,39 @@ class FileBrowser(object):
fout = os.path.join(realdir, filename)
if not os.path.exists(fout):
open(fout, 'w')
return 'var responce = {result: \'1\'}'
return "var responce = {result: '1'}"
else:
return '{result: \'0\'}'
return "{result: '0'}"
def deleteItem(self, dir, files):
"""Delete a list of files or directories"""
# XXX-Marco do not shadow 'dir'
realdir = self._realdir(dir)
lfiles = urllib.unquote(files).split(',,,')
try:
# XXX-Marco do not shadow 'file'
for file in lfiles:
file = os.path.join(realdir, file)
if not os.path.exists(file):
continue #silent skip file....
continue # silent skip file....
details = file.split('/')
last = details[-1]
if last and last.startswith('.'):
continue #cannot delete this file/directory, to prevent security
continue # cannot delete this file/directory, to prevent security
if os.path.isdir(file):
shutil.rmtree(file)
else:
os.unlink(file)
except Exception as e:
return str(e)
return '{result: \'1\'}'
return "{result: '1'}"
def copyItem(self, dir, files, del_source=False):
"""Copy a list of files or directory to dir"""
realdir = self._realdir(dir)
lfiles = urllib.unquote(files).split(',,,')
try:
# XXX-Marco do not shadow 'file'
for file in lfiles:
realfile = realpath(self.config, file)
if not realfile:
......@@ -117,7 +115,7 @@ class FileBrowser(object):
details = realfile.split('/')
dest = os.path.join(realdir, details[-1])
if os.path.exists(dest):
raise NameError('NOT ALLOWED OPERATION : File or directory already exist')
raise NameError('NOT ALLOWED OPERATION : File or directory already exists')
if os.path.isdir(realfile):
shutil.copytree(realfile, dest)
if del_source:
......@@ -128,7 +126,7 @@ class FileBrowser(object):
os.unlink(realfile)
except Exception as e:
return str(e)
return '{result: \'1\'}'
return "{result: '1'}"
def rename(self, dir, filename, newfilename):
"""Rename file or directory to dir/filename"""
......@@ -139,8 +137,8 @@ class FileBrowser(object):
tofile = os.path.join(realdir, newfilename)
if not os.path.exists(tofile):
os.rename(realfile, tofile)
return '{result: \'1\'}'
raise NameError('NOT ALLOWED OPERATION : File or directory already exist')
return "{result: '1'}"
raise NameError('NOT ALLOWED OPERATION : File or directory already exists')
def copyAsFile(self, dir, filename, newfilename):
"""Copy file or directory to dir/filename"""
......@@ -148,21 +146,21 @@ class FileBrowser(object):
fromfile = os.path.join(realdir, filename)
tofile = os.path.join(realdir, newfilename)
if not os.path.exists(fromfile):
raise NameError('NOT ALLOWED OPERATION : File or directory not exist')
raise NameError('NOT ALLOWED OPERATION : File or directory does not exist')
if not os.path.exists(tofile):
shutil.copy(fromfile, tofile)
return '{result: \'1\'}'
raise NameError('NOT ALLOWED OPERATION : File or directory already exist')
return "{result: '1'}"
raise NameError('NOT ALLOWED OPERATION : File or directory already exists')
def uploadFile(self, dir, files):
"""Upload a list of file in directory dir"""
"""Upload a list of files in directory dir"""
realdir = self._realdir(dir)
for file in files:
if files[file]:
filename = werkzeug.secure_filename(files[file].filename)
if not os.path.exists(os.path.join(dir, filename)):
files[file].save(os.path.join(realdir, filename))
return '{result: \'1\'}'
return "{result: '1'}"
def downloadFile(self, dir, filename):
"""Download file dir/filename"""
......@@ -179,7 +177,7 @@ class FileBrowser(object):
tozip = os.path.join(realdir, newfilename)
fromzip = os.path.join(realdir, filename)
if not os.path.exists(fromzip):
raise NameError('NOT ALLOWED OPERATION : File or directory not exist')
raise NameError('NOT ALLOWED OPERATION : File or directory does not exist')
if not os.path.exists(tozip):
zip = zipfile.ZipFile(tozip, 'w', zipfile.ZIP_DEFLATED)
if os.path.isdir(fromzip):
......@@ -191,8 +189,8 @@ class FileBrowser(object):
else:
zip.write(fromzip)
zip.close()
return '{result: \'1\'}'
raise NameError('NOT ALLOWED OPERATION : File or directory already exist')
return "{result: '1'}"
raise NameError('NOT ALLOWED OPERATION : File or directory already exists')
def unzipFile(self, dir, filename, newfilename):
"""Extract a zipped archive"""
......@@ -200,7 +198,7 @@ class FileBrowser(object):
target = os.path.join(realdir, newfilename)
archive = os.path.join(realdir, filename)
if not os.path.exists(archive):
raise NameError('NOT ALLOWED OPERATION : File or directory not exist')
raise NameError('NOT ALLOWED OPERATION : File or directory does not exist')
if not os.path.exists(target):
zip = zipfile.ZipFile(archive)
#member = zip.namelist()
......@@ -209,8 +207,8 @@ class FileBrowser(object):
# zip.extractall(target)
#else:
# zip.extract(member[0], newfilename)
return '{result: \'1\'}'
raise NameError('NOT ALLOWED OPERATION : File or directory already exist')
return "{result: '1'}"
raise NameError('NOT ALLOWED OPERATION : File or directory already exists')
def readFile(self, dir, filename, truncate=False):
"""Read file dir/filename and return content"""
......@@ -221,6 +219,6 @@ class FileBrowser(object):
if not isText(realfile):
return "FILE ERROR: Cannot display binary file, please open a text file only!"
if not truncate:
return open(realfile, 'r').read()
return open(realfile).read()
else:
return tail(open(realfile, 'r'), 0)
return tail(open(realfile), 0)
......@@ -12,12 +12,12 @@ from flask import jsonify
def cloneRepo(data):
"""Clonne a repository
"""Clone a repository
Args:
data: a dictionnary of parameters to use:
data: a dictionary of parameters to use:
data['path'] is the path of the new project
data['repo'] is the url of the repository to be cloned
data['email'] is the user email
data['email'] is the user's email
data['user'] is the name of the user
Returns:
a jsonify data"""
......@@ -29,7 +29,7 @@ def cloneRepo(data):
json = ""
try:
if os.path.exists(workDir) and len(os.listdir(workDir)) < 2:
shutil.rmtree(workDir) #delete useless files
shutil.rmtree(workDir) # delete useless files
repo = Repo.clone_from(data["repo"], workDir)
config_writer = repo.config_writer()
config_writer.add_section("user")
......@@ -42,10 +42,11 @@ def cloneRepo(data):
json = safeResult(str(e))
return jsonify(code=code, result=json)
def gitStatus(project):
"""Run git status and return status of specified project folder
Args:
project: path of the projet ti get status
project: path of the projet to get status
Returns:
a parsed string that contains the result of git status"""
code = 0
......@@ -61,6 +62,7 @@ def gitStatus(project):
json = safeResult(str(e))
return jsonify(code=code, result=json, branch=branch, dirty=isdirty)
def switchBranch(project, name):
"""Switch a git branch
Args:
......@@ -76,13 +78,14 @@ def switchBranch(project, name):
if name == current_branch:
json = "This is already your active branch for this project"
else:
git = repo.git
git = repo.git
git.checkout(name)
code = 1
except Exception as e:
json = safeResult(str(e))
return jsonify(code=code, result=json)
def addBranch(project, name, onlyCheckout=False):
"""Add new git branch to the repository
Args:
......@@ -95,7 +98,7 @@ def addBranch(project, name, onlyCheckout=False):
json = ""
try:
repo = Repo(project)
git = repo.git
git = repo.git
if not onlyCheckout:
git.checkout('-b', name)
else:
......@@ -105,6 +108,7 @@ def addBranch(project, name, onlyCheckout=False):
json = safeResult(str(e))
return jsonify(code=code, result=json)
def getDiff(project):
"""Get git diff for the specified project directory"""
result = ""
......@@ -117,44 +121,40 @@ def getDiff(project):
result = safeResult(str(e))
return result
def gitCommit(project, msg):
"""Commit changes for the specified repository
Args:
project: directory of the local repository
msg: commit message"""
code = 1
json = ""
repo = Repo(project)
if repo.is_dirty:
git = repo.git
#add file to be commited
files = repo.untracked_files
for f in files:
git.add(f)
#Commit all modified and untracked files
git.commit('-a', '-m', msg)
else:
json = "Nothing to be commited"
return jsonify(code=code, result=json)
def gitPush(project):
"""Push changes for the specified repository
def gitPush(project, msg):
"""Commit and Push changes for the specified repository
Args:
project: directory of the local repository
msg: commit message"""
code = 0
json = ""
repo = Repo(project)
undo_commit = False
try:
git = repo.git
#push changes to repo
current_branch = repo.active_branch.name
git.push('origin', current_branch)
code = 1
repo = Repo(project)
if repo.is_dirty:
git = repo.git
current_branch = repo.active_branch.name
#add file to be commited
files = repo.untracked_files
for f in files:
git.add(f)
#Commit all modified and untracked files
git.commit('-a', '-m', msg)
undo_commit = True
#push changes to repo
git.push('origin', current_branch)
code = 1
else:
json = "Nothing to commit"
code = 1
except Exception as e:
if undo_commit:
git.reset("HEAD~") # undo previous commit
json = safeResult(str(e))
return jsonify(code=code, result=json)
def gitPull(project):
result = ""
code = 0
......@@ -167,6 +167,7 @@ def gitPull(project):
result = safeResult(str(e))
return jsonify(code=code, result=result)
def safeResult(result):
"""Parse string and remove credential of the user"""
regex = re.compile("(https:\/\/)([\w\d\._-]+:[\w\d\._-]+)\@([\S]+\s)", re.VERBOSE)
......
......@@ -9,7 +9,7 @@ SLAPRUNNER_PROCESS_LIST = []
class Popen(subprocess.Popen):
"""
Extension of Popen to launch and kill process in a clean way
Extension of Popen to launch and kill processes in a clean way
"""
def __init__(self, *args, **kwargs):
"""
......@@ -28,7 +28,7 @@ class Popen(subprocess.Popen):
def kill(self, sig=signal.SIGTERM, recursive=False):
"""
Kill process and all its descendant if recursive
Kill process and all its descendants if recursive
"""
if self.poll() is None:
if recursive:
......@@ -83,7 +83,7 @@ def isRunning(name):
def killRunningProcess(name, recursive=False):
"""
Kill all process with name
Kill all processes with a given name
"""
for process in SLAPRUNNER_PROCESS_LIST:
if process.name == name:
......@@ -92,7 +92,7 @@ def killRunningProcess(name, recursive=False):
def handler(sig, frame):
"""
Signal handler to kill all process
Signal handler to kill all processes
"""
pid = os.getpid()
os.kill(-pid, sig)
......
This diff is collapsed.
......@@ -37,8 +37,8 @@ function deleteCookie(name, path, domain) {
function setCookie(name, value, expires, path, domain, secure) {
"use strict";
if (getCookie(name) !== null){
deleteCookie(name);
if (getCookie(name) !== null) {
deleteCookie(name);
}
if (!expires) {
var today = new Date();
......
......@@ -32,8 +32,14 @@ $(document).ready(function () {
$("#error").Popup(data.result, {type: 'alert', duration: 3000});
}
})
.error(function () {
$("#error").Popup("Cannot send your account identifier please try again!!",
.error(function (response) {
if (response && response.status === 401) {
$("#error").Popup('Login and/or password is incorrect.',
{type: 'alert', duration: 3000}
);
return
}
$("#error").Popup("Cannot send your account identifier",
{type: 'alert', duration: 3000});
})
.complete(function () {
......
......@@ -44,24 +44,24 @@ function clearAll(setStop) {
running = setStop;
}
function removeFirstLog(){
"use strict";
currentLogSize -= parseInt($("#salpgridLog p:first-child").attr('rel'), 10);
$("#salpgridLog p:first-child").remove();
function removeFirstLog() {
"use strict";
currentLogSize -= parseInt($("#salpgridLog p:first-child").attr('rel'), 10);
$("#salpgridLog p:first-child").remove();
}
function getRunningState() {
"use strict";
var size = 0;
var log_info = "";
var param = {
position: logReadingPosition,
log: (processState !== "Checking" && openedlogpage === processType.toLowerCase()) ? openedlogpage : ""
},
var size = 0,
log_info = "",
param = {
position: logReadingPosition,
log: (processState !== "Checking" && openedlogpage === processType.toLowerCase()) ? openedlogpage : ""
},
jqxhr = $.post(url, param, function (data) {
setRunningState(data);
size = data.content.position - logReadingPosition;
if (logReadingPosition !== 0 && data.content.truncated){
if (logReadingPosition !== 0 && data.content.truncated) {
log_info = "<p class='info' rel='0'>SLAPRUNNER INFO: SLAPGRID-LOG HAS BEEN TRUNCATED HERE. To see full log reload your log page</p>";
}
logReadingPosition = data.content.position;
......@@ -78,18 +78,16 @@ function getRunningState() {
}
processState = running ? "Running" : "Stopped";
currentLogSize += parseInt(size, 10);
if (currentLogSize > maxLogSize){
if (currentLogSize > maxLogSize) {
//Remove the first element into log div
removeFirstLog();
if (currentLogSize > maxLogSize){
if (currentLogSize > maxLogSize) {
removeFirstLog(); //in cas of previous <p/> size is 0
}
}
})
.error(function () {
}).error(function () {
clearAll(false);
})
.complete(function () {
}).complete(function () {
if (running) {
setTimeout(function () {
getRunningState();
......
......@@ -19,11 +19,10 @@ $(document).ready(function () {
send = false,
edit = false,
selection = "",
edit_status = "";
var base_path = function() {
return softwareDisplay ? projectDir : currentProject;
}
edit_status = "",
base_path = function () {
return softwareDisplay ? projectDir : currentProject;
};
function setEditMode(file) {
var i,
......
......@@ -6,5 +6,5 @@
$(document).ready(function () {
"use strict";
$('#fileNavigator').gsFileManager({script: $SCRIPT_ROOT + "/fileBrowser", root:'workspace/'});
$('#fileNavigator').gsFileManager({script: $SCRIPT_ROOT + "/fileBrowser", root: 'workspace/'});
});
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment