Commit cfbed313 authored by Nicolas Wavrant's avatar Nicolas Wavrant

Merge remote-tracking branch 'origin/master'

Conflicts:
	slapos/runner/gittools.py
parents 7c98e0a2 022fcb2d
0.37.4 (2013-10-15)
-------------------
* Improve QEMU QMP wrapper by adding drive-backup method and other helpers. [0afb7d6, 95d0c8b]
0.37.3 (2013-10-10)
-------------------
* pubsub: don't swallow output of subprocess to allow debug. [c503484]
0.37.2 (2013-10-10)
-------------------
* Add QEMU QMP wrapper. [9e819a8]
* KVM resiliency test: update docstring about how to setup disk image. [dbe347f]
* KVM resiliency test: change key for each clone. [7ef1db3]
0.37.1 (2013-10-03)
-------------------
* pubsub notifier: handle timeout and other connection errors. [ac4c75c]
* equeue: cast str(timestamp) to please gdbm. [8b067d6]
0.37 (2013-09-30)
=================
* equeue: log output of subprocess. [1694937]
* slaprunner: don't send 200 when login is bad. [4a8e10bf]
* Improve reliability of resiliency tests.
0.36 (2013-09-05) 0.36 (2013-09-05)
================= =================
......
...@@ -2,7 +2,7 @@ from setuptools import setup, find_packages ...@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
import glob import glob
import os import os
version = '0.36.1-dev' version = '0.37.4'
name = 'slapos.toolbox' name = 'slapos.toolbox'
long_description = open("README.txt").read() + "\n" + \ long_description = open("README.txt").read() + "\n" + \
open("CHANGES.txt").read() + "\n" open("CHANGES.txt").read() + "\n"
...@@ -70,6 +70,7 @@ setup(name=name, ...@@ -70,6 +70,7 @@ setup(name=name,
'onetimeupload = slapos.onetimeupload:main', 'onetimeupload = slapos.onetimeupload:main',
'pubsubnotifier = slapos.pubsub.notifier:main', 'pubsubnotifier = slapos.pubsub.notifier:main',
'pubsubserver = slapos.pubsub:main', 'pubsubserver = slapos.pubsub:main',
'qemu-qmp-client = slapos.qemuqmpclient:main',
'shacache = slapos.shacache:main', 'shacache = slapos.shacache:main',
'slapbuilder = slapos.builder:main', 'slapbuilder = slapos.builder:main',
'slapcontainer = slapos.container:main', 'slapcontainer = slapos.container:main',
......
...@@ -72,6 +72,26 @@ class EqueueServer(SocketServer.ThreadingUnixStreamServer): ...@@ -72,6 +72,26 @@ class EqueueServer(SocketServer.ThreadingUnixStreamServer):
def setDB(self, database): def setDB(self, database):
self.db = gdbm.open(database, 'cs', 0700) self.db = gdbm.open(database, 'cs', 0700)
def _runCommandIfNeeded(self, command, timestamp):
with self.lock:
if command in self.db and timestamp <= int(self.db[command]):
self.logger.info("%s already run.", command)
return
self.logger.info("Running %s, %s with output:", command, timestamp)
try:
self.logger.info(
subprocess.check_output([command], stderr=subprocess.STDOUT)
)
self.logger.info("%s finished successfully.", command)
except subprocess.CalledProcessError as e:
self.logger.warning("%s exited with status %s. output is: \n %s" % (
command,
e.returncode,
e.output,
))
self.db[command] = str(timestamp)
def process_request_thread(self, request, client_address): def process_request_thread(self, request, client_address):
# Handle request # Handle request
self.logger.debug("Connection with file descriptor %d", request.fileno()) self.logger.debug("Connection with file descriptor %d", request.fileno())
...@@ -102,23 +122,7 @@ class EqueueServer(SocketServer.ThreadingUnixStreamServer): ...@@ -102,23 +122,7 @@ class EqueueServer(SocketServer.ThreadingUnixStreamServer):
except: except:
self.logger.warning("Couldn't respond to %r", request.fileno()) self.logger.warning("Couldn't respond to %r", request.fileno())
self.close_request(request) self.close_request(request)
# Run command if needed self._runCommandIfNeeded(command, timestamp)
with self.lock:
if command not in self.db or timestamp > int(self.db[command]):
self.logger.info("Running %s, %s", command, timestamp)
# XXX stdout and stderr redirected to null as they are not read
with open(os.devnull, 'r+') as f:
status = subprocess.call([command], close_fds=True,
stdin=f, stdout=f, stderr=f)
if status:
self.logger.warning("%s finished with non zero status.",
command)
else:
self.logger.info("%s finished successfully.", command)
self.db[command] = timestamp
else:
self.logger.info("%s already runned.", command)
# Well the following function is made for schrodinger's files, # Well the following function is made for schrodinger's files,
# It will work if the file exists or not # It will work if the file exists or not
def remove_existing_file(path): def remove_existing_file(path):
......
...@@ -4,11 +4,11 @@ ...@@ -4,11 +4,11 @@
import argparse import argparse
import csv import csv
import httplib import httplib
import os
import socket import socket
import subprocess import subprocess
import sys import sys
import time import time
import traceback
import urllib2 import urllib2
import urlparse import urlparse
import uuid import uuid
...@@ -31,28 +31,23 @@ def main(): ...@@ -31,28 +31,23 @@ def main():
args = parser.parse_args() args = parser.parse_args()
with open(os.devnull) as devnull: try:
command = subprocess.Popen(args.executable[0], content = subprocess.check_output(
stdin=subprocess.PIPE, args.executable[0],
stdout=devnull, stderr=subprocess.STDOUT
stderr=subprocess.PIPE, )
close_fds=True) exit_code = 0
command.stdin.flush() except subprocess.CalledProcessError as e:
command.stdin.close() content = e.output
exit_code = e.returncode
command_failed = (command.wait() != 0)
command_stderr = command.stderr.read() print content
if command_failed: content += ("\n<p>Failed with returncode <em>%d</em>.</p>"
content = ("<p>Failed with returncode <em>%d</em>.</p>" "<p>Output is: </p><pre>%s</pre>" % (
"<p>Standard error output is :</p><pre>%s</pre>") % ( exit_code,
command.poll(), content.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
command_stderr.replace('&', '&amp;')\ ))
.replace('<', '&lt;')\
.replace('>', '&gt;'),
)
else:
content = "<p>Everything went well.</p>"
with open(args.logfile[0], 'a') as file_: with open(args.logfile[0], 'a') as file_:
cvsfile = csv.writer(file_) cvsfile = csv.writer(file_)
...@@ -63,9 +58,8 @@ def main(): ...@@ -63,9 +58,8 @@ def main():
'slapos:%s' % uuid.uuid4(), 'slapos:%s' % uuid.uuid4(),
]) ])
if command_failed: if exit_code != 0:
sys.stderr.write('%s\n' % command_stderr) sys.exit(exit_code)
sys.exit(1)
print 'Fetching %s feed...' % args.feed_url[0] print 'Fetching %s feed...' % args.feed_url[0]
...@@ -80,14 +74,18 @@ def main(): ...@@ -80,14 +74,18 @@ def main():
notification_port = socket.getservbyname(notification_url.scheme) notification_port = socket.getservbyname(notification_url.scheme)
headers = {'Content-Type': feed.info().getheader('Content-Type')} headers = {'Content-Type': feed.info().getheader('Content-Type')}
notification = httplib.HTTPConnection(notification_url.hostname, try:
notification_port) notification = httplib.HTTPConnection(notification_url.hostname,
notification.request('POST', notification_url.path, body, headers) notification_port)
response = notification.getresponse() notification.request('POST', notification_url.path, body, headers)
response = notification.getresponse()
if not (200 <= response.status < 300): if not (200 <= response.status < 300):
sys.stderr.write("The remote server at %s didn't send a successful reponse.\n" % notif_url) sys.stderr.write("The remote server at %s didn't send a successful reponse.\n" % notif_url)
sys.stderr.write("Its response was %r\n" % response.reason) sys.stderr.write("Its response was %r\n" % response.reason)
some_notification_failed = True
except socket.error as exc:
sys.stderr.write("Connection with remote server at %s failed:\n" % notif_url)
sys.stderr.write(traceback.format_exc(exc))
some_notification_failed = True some_notification_failed = True
if some_notification_failed: if some_notification_failed:
......
##############################################################################
#
# Copyright (c) 2013 Vifib SARL and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import argparse
import json
import os
import pprint
import socket
import time
def parseArgument():
"""
Very basic argument parser. Might blow up for anything else than
"./executable mysocket.sock stop/resume".
"""
parser = argparse.ArgumentParser()
parser.add_argument('--suspend', action='store_const', dest='action', const='suspend')
parser.add_argument('--resume', action='store_const', dest='action', const='resume')
parser.add_argument('--create-snapshot', action='store_const', dest='action', const='createSnapshot')
parser.add_argument('--create-internal-snapshot', action='store_const', dest='action', const='createInternalSnapshot')
parser.add_argument('--delete-internal-snapshot', action='store_const', dest='action', const='deleteInternalSnapshot')
parser.add_argument('--drive-backup', action='store_const', dest='action', const='driveBackup')
parser.add_argument('--query-commands', action='store_const', dest='action', const='queryCommands')
parser.add_argument('--socket', dest='unix_socket_location', required=True)
parser.add_argument('remainding_argument_list', nargs=argparse.REMAINDER)
args = parser.parse_args()
return args.unix_socket_location, args.action, args.remainding_argument_list
class QemuQMPWrapper(object):
"""
Small wrapper around Qemu's QMP to control a qemu VM.
See http://git.qemu.org/?p=qemu.git;a=blob;f=qmp-commands.hx for
QMP API definition.
"""
def __init__(self, unix_socket_location):
self.socket = self.connectToQemu(unix_socket_location)
self.capabilities()
@staticmethod
def connectToQemu(unix_socket_location):
"""
Create a socket, connect to qemu, be sure it answers, return socket.
"""
if not os.path.exists(unix_socket_location):
raise Exception('unix socket %s does not exist.' % unix_socket_location)
print 'Connecting to qemu...'
so = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
connected = False
while not connected:
try:
so.connect(unix_socket_location)
except socket.error:
time.sleep(1)
print 'Could not connect, retrying...'
else:
connected = True
so.recv(1024)
return so
def _send(self, message):
self.socket.send(json.dumps(message))
data = self.socket.recv(65535)
try:
return json.loads(data)
except ValueError:
print 'Wrong data: %s' % data
def _getVMStatus(self):
response = self._send({'execute': 'query-status'})
if response:
return self._send({'execute': 'query-status'})['return']['status']
else:
raise IOError('Empty answer')
def _waitForVMStatus(self, wanted_status):
while True:
try:
actual_status = self._getVMStatus()
if actual_status == wanted_status:
return
else:
print 'VM in %s status, wanting it to be %s, retrying...' % (
actual_status, wanted_status)
time.sleep(1)
except IOError:
print 'VM not ready, retrying...'
def capabilities(self):
print 'Asking for capabilities...'
self._send({'execute': 'qmp_capabilities'})
def suspend(self):
print 'Suspending VM...'
self._send({'execute': 'stop'})
self._waitForVMStatus('paused')
def resume(self):
print 'Resuming VM...'
self._send({'execute': 'cont'})
self._waitForVMStatus('running')
def _queryBlockJobs(self, device):
return self._send({'execute': 'query-block-jobs'})
def _getRunningJobList(self, device):
result = self._queryBlockJobs(device)
if result.get('return'):
return result['return']
else:
return
def driveBackup(self, backup_target, source_device='virtio0', sync_type='full'):
print 'Asking Qemu to perform backup to %s' % backup_target
# XXX: check for error
self._send({
'execute': 'drive-backup',
'arguments': {
'device': source_device,
'sync': sync_type,
'target': backup_target,
}
})
while self._getRunningJobList(backup_target):
print 'Job is not finished yet.'
time.sleep(20)
def createSnapshot(self, snapshot_file, device='virtio0'):
print self._send({
'execute': 'blockdev-snapshot-sync',
'arguments': {
'device': device,
'snapshot-file': snapshot_file,
}
})
def createInternalSnapshot(self, name=None, device='virtio0'):
if name is None:
name = int(time.time())
self._send({
'execute': 'blockdev-snapshot-internal-sync',
'arguments': {
'device': device,
'name': name,
}
})
def deleteInternalSnapshot(self, name, device='virtio0'):
self._send({
'execute': 'blockdev-snapshot-delete-internal-sync',
'arguments': {
'device': device,
'name': name,
}
})
def queryCommands(self):
pprint.pprint(self._send({'execute': 'query-commands'})['return'])
def main():
unix_socket_location, action, remainding_argument_list = parseArgument()
qemu_wrapper = QemuQMPWrapper(unix_socket_location)
if remainding_argument_list:
getattr(qemu_wrapper, action)(*remainding_argument_list)
else:
getattr(qemu_wrapper, action)()
if __name__ == '__main__':
main()
...@@ -15,3 +15,32 @@ This module contains: ...@@ -15,3 +15,32 @@ This module contains:
* A Resiliency Test Suite framework (in suites/), used to easily write new * A Resiliency Test Suite framework (in suites/), used to easily write new
test suites test suites
* A list of test suites * A list of test suites
TODO :
* Check that each partition is in different slapos node.
* Test for bang calls
* Be able to configure from ERP5 Master (i.e from instance parameters): count of PBS/clones, then test several possibilities (so called "count" in test suite)
* Use Nexedi ERP5, when in production.
* Put the KVM disk image in a safe place.
------------
For reference: How-to deploy the whole test system
1/ Deploy a SlapOS Master
2/ Deploy an ERP5, install erp5_test_result BT with scalability feature (current in scalability-master2 branch of erp5.git) (currently, had to change a few lines in the scalability extension of the portal_class, should be commited)
3/ Configure 3 nodes in the new SlapOS Master, deploy in each a testnode with scalability feature (erp5testnode-scalability branch of slapos.git) with parameters like:
<?xml version="1.0" encoding="utf-8"?>
<instance>
<parameter id="test-node-title">COMP-0-Testnode</parameter>
<parameter id="test-suite-master-url">https://zope:insecure@softinst43496.host.vifib.net/erp5/portal_task_distribution/1</parameter>
</instance>
3bis/ Supply and request http://git.erp5.org/gitweb/slapos.git/blob_plain/refs/tags/slapos-0.92:/software/kvm/software.cfg on a public node (so that vnc frontends are ok). "domain" parameter should be [ipv6] of partition. ipv4:4443 should be 6tunnelled to ipv6:4443 (Note: here, instead, I just hacked kvm_frontend to listen on ipv6).
3ter/ Supply and request http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg, with any "domain" (it won't be used), on a public node (so that web frontends are ok)
4/ On the ERP5 instance, create a project, a Task Distribution (in portal_task_distribution, type Scalability Task Distribution)
5/ On the ERP5 instance, create a Test Suite, validate it
Note: the slapos nodes are currently deployed using slapos-in-partition.
Note: you have to manually kill -10 the erp5testnode process to start deployment of test because it doesn't know when SR installation is finished.
Note: you have to manually run slapos-node-software --all on the slapos nodes if you are developping the SR you are testing.
...@@ -37,10 +37,6 @@ import traceback ...@@ -37,10 +37,6 @@ import traceback
from erp5.util import taskdistribution from erp5.util import taskdistribution
from erp5.util.testnode import Utils from erp5.util.testnode import Utils
MAX_INSTALLATION_TIME = 60 * 50
MAX_TESTING_TIME = 60
MAX_GETTING_CONNECTION_TIME = 60 * 5
def importFrom(name): def importFrom(name):
""" """
Import a test suite module (in the suites module) and return it. Import a test suite module (in the suites module) and return it.
...@@ -147,7 +143,7 @@ class ScalabilityLauncher(object): ...@@ -147,7 +143,7 @@ class ScalabilityLauncher(object):
Return a ScalabilityTest with current running test case informations, Return a ScalabilityTest with current running test case informations,
or None if no test_case ready or None if no test_case ready
""" """
data = self.test_result.getNextTestCase() data = self.test_result.getRunningTestCase()
if data == None: if data == None:
return None return None
decoded_data = Utils.deunicodeData(json.loads( decoded_data = Utils.deunicodeData(json.loads(
......
...@@ -42,6 +42,10 @@ import urllib ...@@ -42,6 +42,10 @@ import urllib
logger = logging.getLogger('KVMResiliencyTest') logger = logging.getLogger('KVMResiliencyTest')
# Wait for 2 hours before renaming, so that replication of data is done
# (~1GB of data to backup)
SLEEP_TIME = 2 * 60 * 60
def fetchMainInstanceIP(current_partition, software_release, instance_name): def fetchMainInstanceIP(current_partition, software_release, instance_name):
return current_partition.request( return current_partition.request(
software_release=software_release, software_release=software_release,
...@@ -105,7 +109,9 @@ def runTestSuite(server_url, key_file, cert_file, ...@@ -105,7 +109,9 @@ def runTestSuite(server_url, key_file, cert_file,
3/ Resilience is done, wait XX seconds 3/ Resilience is done, wait XX seconds
4/ For each clone: do a takeover. Check that IPv6 of new main instance is different. Check, when doing a http request to the new VM that will fetch the stored random number, that the sent number is the same. 4/ For each clone: do a takeover. Check that IPv6 of new main instance is different. Check, when doing a http request to the new VM that will fetch the stored random number, that the sent number is the same.
Note: disk image is a simple debian with the following python code running at boot: Note: disk image is a simple debian with gunicorn and flask installed:
apt-get install python-setuptools; easy_install gunicorn flask
With the following python code running at boot in /root/number.py:
import os import os
...@@ -115,7 +121,7 @@ def runTestSuite(server_url, key_file, cert_file, ...@@ -115,7 +121,7 @@ def runTestSuite(server_url, key_file, cert_file,
storage = 'storage.txt' storage = 'storage.txt'
@app.route("/") @app.route("/")
def greeting_list(): # 'cause they are several greetings, and plural is forbidden. def greeting_list(): # 'cause there are several greetings, and plural is forbidden.
return "Hello World" return "Hello World"
@app.route("/get") @app.route("/get")
...@@ -124,13 +130,39 @@ def runTestSuite(server_url, key_file, cert_file, ...@@ -124,13 +130,39 @@ def runTestSuite(server_url, key_file, cert_file,
@app.route("/set") @app.route("/set")
def set(): def set():
if os.path.exists(storage): #if os.path.exists(storage):
abort(503) # abort(503)
open(storage, 'w').write(request.args['key']) open(storage, 'w').write(request.args['key'])
return "OK" return "OK"
if __name__ == "__main__": if __name__ == "__main__":
app.run(host='0.0.0.0', port=80) app.run(host='0.0.0.0', port=80)
Then create the boot script:
echo "cd /root; /usr/local/bin/gunicorn number:app -b 0.0.0.0:80 -D --error-logfile /root/error_log --access-logfile /root/access_log" > /etc/init.d/gunicorn-number
chmod +x /etc/init.d/gunicorn-number
update-rc.d gunicorn-number defaults
There also is a script that randomly generates I/O in /root/io.sh:
#!/bin/sh
# Randomly generates high I/O on disk. Goal is to write on disk so that
# it flushes at the same time that snapshot of disk image is done, to check if
# it doesn't corrupt image.
# Ayayo!
while [ 1 ]; do
dd if=/dev/urandom of=random count=2k
sync
sleep 0.2
done
Then create the boot script:
echo "/bin/sh /root/io.sh &" > /etc/init.d/io
chmod +x /etc/init.d/io
update-rc.d io defaults
""" """
slap = slapos.slap.slap() slap = slapos.slap.slap()
slap.initializeConnection(server_url, key_file, cert_file) slap.initializeConnection(server_url, key_file, cert_file)
...@@ -142,9 +174,6 @@ def runTestSuite(server_url, key_file, cert_file, ...@@ -142,9 +174,6 @@ def runTestSuite(server_url, key_file, cert_file,
ip = fetchMainInstanceIP(partition, software, kvm_rootinstance_name) ip = fetchMainInstanceIP(partition, software, kvm_rootinstance_name)
logger.info('KVM IP is %s.' % ip) logger.info('KVM IP is %s.' % ip)
key = setRandomKey(ip)
logger.info('Key set for test in current KVM: %s.' % key)
# In resilient stack, main instance (example with KVM) is named "kvm0", # In resilient stack, main instance (example with KVM) is named "kvm0",
# clones are named "kvm1", "kvm2", ... # clones are named "kvm1", "kvm2", ...
clone_count = int(total_instance_count) - 1 clone_count = int(total_instance_count) - 1
...@@ -154,23 +183,34 @@ def runTestSuite(server_url, key_file, cert_file, ...@@ -154,23 +183,34 @@ def runTestSuite(server_url, key_file, cert_file,
# Test each clone # Test each clone
while current_clone <= clone_count: while current_clone <= clone_count:
logger.info('Testing kvm%s.' % current_clone) logger.info('Testing kvm%s.' % current_clone)
# Wait for XX minutes so that replication is done
sleep_time = 60 * 15#2 * 60 * 60 key = setRandomKey(ip)
logger.info('Sleeping for %s seconds.' % sleep_time) logger.info('Key set for test in current KVM: %s.' % key)
time.sleep(sleep_time)
logger.info('Sleeping for %s seconds.' % SLEEP_TIME)
time.sleep(SLEEP_TIME)
# Make the clone instance takeover the main instance # Make the clone instance takeover the main instance
logger.info('Replacing main instance by clone instance...') logger.info('Replacing main instance by clone instance...')
takeover( for i in range(0, 10):
server_url=server_url, try:
key_file=key_file, takeover(
cert_file=cert_file, server_url=server_url,
computer_guid=computer_id, key_file=key_file,
partition_id=partition_id, cert_file=cert_file,
software_release=software, computer_guid=computer_id,
namebase=namebase, partition_id=partition_id,
winner_instance_suffix=str(current_clone), software_release=software,
) namebase=namebase,
winner_instance_suffix=str(current_clone),
)
break
except: # SSLError
traceback.print_exc()
if i is 9:
raise
logger.warning('takeover failed. Retrying...')
time.sleep(10)
logger.info('Done.') logger.info('Done.')
# Wait for the new IP (of old-clone new-main instance) to appear. # Wait for the new IP (of old-clone new-main instance) to appear.
......
...@@ -42,6 +42,7 @@ class ResiliencyTestSuite(object): ...@@ -42,6 +42,7 @@ class ResiliencyTestSuite(object):
computer_id, partition_id, software, computer_id, partition_id, software,
namebase, namebase,
root_instance_name, root_instance_name,
sleep_time_between_test=600,
total_instance_count="3"): total_instance_count="3"):
self.server_url = server_url self.server_url = server_url
self.key_file = key_file self.key_file = key_file
...@@ -52,6 +53,7 @@ class ResiliencyTestSuite(object): ...@@ -52,6 +53,7 @@ class ResiliencyTestSuite(object):
self.namebase = namebase self.namebase = namebase
self.total_instance_count = total_instance_count self.total_instance_count = total_instance_count
self.root_instance_name = root_instance_name self.root_instance_name = root_instance_name
self.sleep_time_between_test = sleep_time_between_test
slap = slapos.slap.slap() slap = slapos.slap.slap()
slap.initializeConnection(server_url, key_file, cert_file) slap.initializeConnection(server_url, key_file, cert_file)
...@@ -151,12 +153,12 @@ class ResiliencyTestSuite(object): ...@@ -151,12 +153,12 @@ class ResiliencyTestSuite(object):
# Test each clone # Test each clone
while current_clone <= clone_count: while current_clone <= clone_count:
# Wait for XX minutes so that replication is done # Wait for XX minutes so that replication is done
sleep_time = 60 * 15#2 * 60 * 60
self.logger.info('Sleeping for %s seconds before testing clone %s.' % ( self.logger.info('Sleeping for %s seconds before testing clone %s.' % (
sleep_time, self.sleep_time_between_test,
current_clone current_clone
)) ))
time.sleep(sleep_time) time.sleep(self.sleep_time_between_test)
self._doTakeover(current_clone) self._doTakeover(current_clone)
self.logger.info('Testing %s%s instance.' % (self.namebase, current_clone)) self.logger.info('Testing %s%s instance.' % (self.namebase, current_clone))
success = self.checkDataOnCloneInstance() success = self.checkDataOnCloneInstance()
......
...@@ -50,22 +50,19 @@ class SlaprunnerTestSuite(ResiliencyTestSuite): ...@@ -50,22 +50,19 @@ class SlaprunnerTestSuite(ResiliencyTestSuite):
# Setup urllib2 with cookie support # Setup urllib2 with cookie support
cookie_jar = cookielib.CookieJar() cookie_jar = cookielib.CookieJar()
self._opener_director = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie_jar)) self._opener_director = urllib2.build_opener(
urllib2.HTTPCookieProcessor(cookie_jar)
)
ResiliencyTestSuite.__init__( ResiliencyTestSuite.__init__(
self, self,
server_url, key_file, cert_file, server_url, key_file, cert_file,
computer_id, partition_id, software, computer_id, partition_id, software,
namebase, namebase,
slaprunner_rootinstance_name slaprunner_rootinstance_name,
300
) )
def generateData(self):
self.slaprunner_password = ''.join(random.SystemRandom().sample(string.ascii_lowercase, 8))
self.slaprunner_user = 'slapos'
self.logger.info('Generated slaprunner user is: %s' % self.slaprunner_user)
self.logger.info('Generated slaprunner password is: %s' % self.slaprunner_password)
def _connectToSlaprunner(self, resource, data=None): def _connectToSlaprunner(self, resource, data=None):
""" """
Utility. Utility.
...@@ -84,23 +81,92 @@ class SlaprunnerTestSuite(ResiliencyTestSuite): ...@@ -84,23 +81,92 @@ class SlaprunnerTestSuite(ResiliencyTestSuite):
def _login(self): def _login(self):
self.logger.debug('Logging in...') self.logger.debug('Logging in...')
self._connectToSlaprunner('doLogin', data='clogin=%s&cpwd=%s' % (self.slaprunner_user, self.slaprunner_password)) self._connectToSlaprunner('doLogin', data='clogin=%s&cpwd=%s' % (
self.slaprunner_user,
self.slaprunner_password)
)
def _retrieveInstanceLogFile(self): def _retrieveInstanceLogFile(self):
""" """
Store the logfile (=data) of the instance, check it is not empty nor it is html. Store the logfile (=data) of the instance, check it is not empty nor it is
html.
""" """
data = self._connectToSlaprunner(resource='fileBrowser', data='opt=9&filename=log.log&dir=instance_root%252Fslappart0%252Fvar%252Flog%252F') data = self._connectToSlaprunner(
resource='fileBrowser',
data='opt=9&filename=log.log&dir=instance_root%252Fslappart0%252Fvar%252Flog%252F'
)
self.logger.info('Retrieved data are:\n%s' % data) self.logger.info('Retrieved data are:\n%s' % data)
if data.find('<') is not -1: if data.find('<') is not -1:
raise IOError('Could not retrieve logfile content: retrieved content is html.') raise IOError(
'Could not retrieve logfile content: retrieved content is html.'
)
if data.find('Could not load') is not -1: if data.find('Could not load') is not -1:
raise IOError('Could not retrieve logfile content: server could not load the file.') raise IOError(
'Could not retrieve logfile content: server could not load the file.'
)
if data.find('Hello') is -1: if data.find('Hello') is -1:
raise IOError('Could not retrieve logfile content: retrieve content does not match "Hello".') raise IOError(
'Could not retrieve logfile content: retrieve content does not match "Hello".'
)
return data return data
def _waitForSoftwareBuild(self):
while self._connectToSlaprunner(resource='slapgridResult', data='position=0&log=').find('"software": true') is not -1:
self.logger.info('Software release is still building. Sleeping...')
time.sleep(15)
self.logger.info('Software Release has been built / is no longer building.')
def _buildSoftwareRelease(self):
self.logger.info('Building the Software Release...')
try:
self._connectToSlaprunner(resource='runSoftwareProfile')
except (NotHttpOkException, urllib2.HTTPError):
# The nginx frontend might timeout before software release is finished.
pass
self._waitForSoftwareBuild()
def _deployInstance(self):
self.logger.info('Deploying instance...')
try:
self._connectToSlaprunner(resource='runInstanceProfile')
except (NotHttpOkException, urllib2.HTTPError):
# The nginx frontend might timeout before someftware release is finished.
pass
while self._connectToSlaprunner(resource='slapgridResult', data='position=0&log=').find('"instance": true') is not -1:
self.logger.info('Buildout is still running. Sleeping...')
time.sleep(15)
self.logger.info('Instance has been deployed.')
def _gitClone(self):
self.logger.debug('Doing git clone of git.erp5.org/repos/slapos.git...')
try:
self._connectToSlaprunner(
resource='cloneRepository',
data='repo=http://git.erp5.org/repos/slapos.git&name=workspace/slapos&email=slapos@slapos.org&user=slapos'
)
except (NotHttpOkException, urllib2.HTTPError):
# cloning can be very long.
# XXX: quite dirty way to check.
while self._connectToSlaprunner('getProjectStatus', data='project=workspace/slapos').find('On branch master') is -1:
self.logger.info('git-cloning ongoing, sleeping...')
def _openSoftwareRelease(self, software_name):
self.logger.debug('Opening %s software release...' % software_name)
self._connectToSlaprunner(
resource='setCurrentProject',
data='path=workspace/slapos/software/%s/' % software_name
)
def generateData(self):
self.slaprunner_password = ''.join(
random.SystemRandom().sample(string.ascii_lowercase, 8)
)
self.slaprunner_user = 'slapos'
self.logger.info('Generated slaprunner user is: %s' % self.slaprunner_user)
self.logger.info('Generated slaprunner password is: %s' % self.slaprunner_password)
def pushDataOnMainInstance(self): def pushDataOnMainInstance(self):
""" """
Create a dummy Software Release, Create a dummy Software Release,
...@@ -117,47 +183,26 @@ class SlaprunnerTestSuite(ResiliencyTestSuite): ...@@ -117,47 +183,26 @@ class SlaprunnerTestSuite(ResiliencyTestSuite):
slaprunner_recovery_code = parameter_dict['password_recovery_code'] slaprunner_recovery_code = parameter_dict['password_recovery_code']
self.logger.debug('Creating the slaprunner account...') self.logger.debug('Creating the slaprunner account...')
self._connectToSlaprunner(resource='configAccount', data='name=slapos&username=%s&email=slapos@slapos.org&password=%s&rcode=%s' % (self.slaprunner_user, self.slaprunner_password, slaprunner_recovery_code)) self._connectToSlaprunner(
resource='configAccount',
data='name=slapos&username=%s&email=slapos@slapos.org&password=%s&rcode=%s' % (
self.slaprunner_user,
self.slaprunner_password,
slaprunner_recovery_code
)
)
self._login() self._login()
self.logger.debug('Opening hello-world software release from git...') self._gitClone()
try:
self._connectToSlaprunner(resource='cloneRepository', data='repo=http://git.erp5.org/repos/slapos.git&name=workspace/slapos&email=slapos@slapos.org&user=slapos')
except (NotHttpOkException, urllib2.HTTPError):
# cloning can be very long.
# XXX: quite dirty way to check.
while self._connectToSlaprunner('getProjectStatus', data='project=workspace/slapos').find('On branch master') is -1:
self.logger.info('git-cloning ongoing, sleeping...')
# XXX should be taken from parameter. # XXX should be taken from parameter.
self._connectToSlaprunner(resource='setCurrentProject', data='path=workspace/slapos/software/helloworld/') self._openSoftwareRelease('helloworld')
self.logger.info('Building the Software Release...') self._buildSoftwareRelease()
try: self._deployInstance()
self._connectToSlaprunner(resource='runSoftwareProfile')
except (NotHttpOkException, urllib2.HTTPError):
# The nginx frontend might timeout before software release is finished.
pass
while self._connectToSlaprunner(resource='slapgridResult', data='position=0&log=').find('"software": true') is not -1:
self.logger.info('Buildout is still running. Sleeping...')
time.sleep(15)
self.logger.info('Software Release has been built.')
self.logger.info('Deploying instance...')
try:
self._connectToSlaprunner(resource='runInstanceProfile')
except (NotHttpOkException, urllib2.HTTPError):
# The nginx frontend might timeout before someftware release is finished.
pass
while self._connectToSlaprunner(resource='slapgridResult', data='position=0&log=').find('"instance": true') is not -1:
self.logger.info('Buildout is still running. Sleeping...')
time.sleep(15)
self.logger.info('Instance has been deployed.')
self.data = self._retrieveInstanceLogFile() self.data = self._retrieveInstanceLogFile()
def checkDataOnCloneInstance(self): def checkDataOnCloneInstance(self):
""" """
Check that: Check that:
...@@ -173,6 +218,10 @@ class SlaprunnerTestSuite(ResiliencyTestSuite): ...@@ -173,6 +218,10 @@ class SlaprunnerTestSuite(ResiliencyTestSuite):
old_parameter_value=old_slaprunner_backend_url old_parameter_value=old_slaprunner_backend_url
) )
self._login() self._login()
self._waitForSoftwareBuild()
# XXX: in theory, it should be done automatically by slaprunner.
# In practice, it is still too dangerous for ERP5 instances.
self._deployInstance()
new_data = self._retrieveInstanceLogFile() new_data = self._retrieveInstanceLogFile()
if new_data == self.data: if new_data == self.data:
......
...@@ -21,8 +21,7 @@ class Parser(OptionParser): ...@@ -21,8 +21,7 @@ class Parser(OptionParser):
""" """
Initialize all possible options. Initialize all possible options.
""" """
OptionParser.__init__(self, usage=usage, version=version, option_list = [
option_list=[
Option("-l", "--log_file", Option("-l", "--log_file",
help="The path to the log file used by the script.", help="The path to the log file used by the script.",
type=str), type=str),
...@@ -38,7 +37,10 @@ class Parser(OptionParser): ...@@ -38,7 +37,10 @@ class Parser(OptionParser):
default=False, default=False,
action="store_true", action="store_true",
help="Debug mode."), help="Debug mode."),
]) ]
OptionParser.__init__(self, usage=usage, version=version,
option_list=option_list)
def check_args(self): def check_args(self):
""" """
...@@ -50,6 +52,7 @@ class Parser(OptionParser): ...@@ -50,6 +52,7 @@ class Parser(OptionParser):
return options, args[0] return options, args[0]
class Config: class Config:
def __init__(self): def __init__(self):
self.configuration_file_path = None self.configuration_file_path = None
...@@ -125,6 +128,7 @@ def run(): ...@@ -125,6 +128,7 @@ def run():
sys.exit(return_code) sys.exit(return_code)
def serve(config): def serve(config):
from views import app from views import app
from werkzeug.contrib.fixers import ProxyFix from werkzeug.contrib.fixers import ProxyFix
...@@ -134,7 +138,7 @@ def serve(config): ...@@ -134,7 +138,7 @@ def serve(config):
app.config.update( app.config.update(
software_log=config.software_root.rstrip('/') + '.log', software_log=config.software_root.rstrip('/') + '.log',
instance_log=config.instance_root.rstrip('/') + '.log', instance_log=config.instance_root.rstrip('/') + '.log',
workspace = workdir, workspace=workdir,
software_link=software_link, software_link=software_link,
instance_profile='instance.cfg', instance_profile='instance.cfg',
software_profile='software.cfg', software_profile='software.cfg',
......
...@@ -11,4 +11,3 @@ def as_json(f): ...@@ -11,4 +11,3 @@ def as_json(f):
def inner(*args, **kwargs): def inner(*args, **kwargs):
return Response(json.dumps(f(*args, **kwargs)), mimetype='application/json') return Response(json.dumps(f(*args, **kwargs)), mimetype='application/json')
return inner return inner
...@@ -15,7 +15,7 @@ from slapos.runner.utils import realpath, tail, isText ...@@ -15,7 +15,7 @@ from slapos.runner.utils import realpath, tail, isText
class FileBrowser(object): class FileBrowser(object):
"""This class contain all bases function for file browser""" """This class contains all base functions for file browser"""
def __init__(self, config): def __init__(self, config):
self.config = config self.config = config
...@@ -31,19 +31,19 @@ class FileBrowser(object): ...@@ -31,19 +31,19 @@ class FileBrowser(object):
html = 'var gsdirs = [], gsfiles = [];' html = 'var gsdirs = [], gsfiles = [];'
dir = urllib.unquote(dir) dir = urllib.unquote(dir)
# 'dir' is used below. XXX should not shadow a builtin name # XXX-Marco 'dir' and 'all' should not shadow builtin names
realdir = realpath(self.config, dir) realdir = realpath(self.config, dir)
if not realdir: if not realdir:
raise NameError('Could not load directory %s: Permission denied' % dir) raise NameError('Could not load directory %s: Permission denied' % dir)
ldir = sorted(os.listdir(realdir), key=str.lower) ldir = sorted(os.listdir(realdir), key=str.lower)
for f in ldir: for f in ldir:
if f.startswith('.') and not all: #do not display this file/folder if f.startswith('.') and not all: # do not display this file/folder
continue continue
ff = os.path.join(dir, f) ff = os.path.join(dir, f)
realfile = os.path.join(realdir, f) realfile = os.path.join(realdir, f)
mdate = datetime.datetime.fromtimestamp(os.path.getmtime(realfile) mdate = datetime.datetime.fromtimestamp(os.path.getmtime(realfile)
).strftime("%Y-%d-%m %I:%M") ).strftime("%Y-%d-%m %I:%M")
md5sum = md5.md5(realfile).hexdigest() md5sum = md5.md5(realfile).hexdigest()
if not os.path.isdir(realfile): if not os.path.isdir(realfile):
size = os.path.getsize(realfile) size = os.path.getsize(realfile)
...@@ -53,25 +53,20 @@ class FileBrowser(object): ...@@ -53,25 +53,20 @@ class FileBrowser(object):
ext = "unknow" ext = "unknow"
else: else:
ext = str.lower(ext) ext = str.lower(ext)
html += 'gsfiles.push(new gsItem("1", "' + f + '", "' + \ html += 'gsfiles.push(new gsItem("1", "%s", "%s", "%s", "%s", "%s", "%s"));' % (f, ff, size, md5sum, ext, mdate)
ff + '", "' + str(size) + '", "' + md5sum + \
'", "' + ext + '", "' + mdate + '"));'
else: else:
html += 'gsdirs.push(new gsItem("2", "' + f + '", "' + \ html += 'gsdirs.push(new gsItem("2", "%s", "%s", "0", "%s", "dir", "%s"));' % (f, ff, md5sum, mdate)
ff + '", "0", "' + md5sum + '", "dir", "' + mdate + '"));'
return html return html
def makeDirectory(self, dir, filename): def makeDirectory(self, dir, filename):
"""Create a directory""" """Create a directory"""
realdir = self._realdir(dir) realdir = self._realdir(dir)
folder = os.path.join(realdir, filename) folder = os.path.join(realdir, filename)
if not os.path.exists(folder): if not os.path.exists(folder):
os.mkdir(folder, 0744) os.mkdir(folder, 0744)
return '{result: \'1\'}' return "{result: '1'}"
else: else:
return '{result: \'0\'}' return "{result: '0'}"
def makeFile(self, dir, filename): def makeFile(self, dir, filename):
"""Create a file in a directory dir taken""" """Create a file in a directory dir taken"""
...@@ -79,36 +74,39 @@ class FileBrowser(object): ...@@ -79,36 +74,39 @@ class FileBrowser(object):
fout = os.path.join(realdir, filename) fout = os.path.join(realdir, filename)
if not os.path.exists(fout): if not os.path.exists(fout):
open(fout, 'w') open(fout, 'w')
return 'var responce = {result: \'1\'}' return "var responce = {result: '1'}"
else: else:
return '{result: \'0\'}' return "{result: '0'}"
def deleteItem(self, dir, files): def deleteItem(self, dir, files):
"""Delete a list of files or directories""" """Delete a list of files or directories"""
# XXX-Marco do not shadow 'dir'
realdir = self._realdir(dir) realdir = self._realdir(dir)
lfiles = urllib.unquote(files).split(',,,') lfiles = urllib.unquote(files).split(',,,')
try: try:
# XXX-Marco do not shadow 'file'
for file in lfiles: for file in lfiles:
file = os.path.join(realdir, file) file = os.path.join(realdir, file)
if not os.path.exists(file): if not os.path.exists(file):
continue #silent skip file.... continue # silent skip file....
details = file.split('/') details = file.split('/')
last = details[-1] last = details[-1]
if last and last.startswith('.'): if last and last.startswith('.'):
continue #cannot delete this file/directory, to prevent security continue # cannot delete this file/directory, to prevent security
if os.path.isdir(file): if os.path.isdir(file):
shutil.rmtree(file) shutil.rmtree(file)
else: else:
os.unlink(file) os.unlink(file)
except Exception as e: except Exception as e:
return str(e) return str(e)
return '{result: \'1\'}' return "{result: '1'}"
def copyItem(self, dir, files, del_source=False): def copyItem(self, dir, files, del_source=False):
"""Copy a list of files or directory to dir""" """Copy a list of files or directory to dir"""
realdir = self._realdir(dir) realdir = self._realdir(dir)
lfiles = urllib.unquote(files).split(',,,') lfiles = urllib.unquote(files).split(',,,')
try: try:
# XXX-Marco do not shadow 'file'
for file in lfiles: for file in lfiles:
realfile = realpath(self.config, file) realfile = realpath(self.config, file)
if not realfile: if not realfile:
...@@ -117,7 +115,7 @@ class FileBrowser(object): ...@@ -117,7 +115,7 @@ class FileBrowser(object):
details = realfile.split('/') details = realfile.split('/')
dest = os.path.join(realdir, details[-1]) dest = os.path.join(realdir, details[-1])
if os.path.exists(dest): if os.path.exists(dest):
raise NameError('NOT ALLOWED OPERATION : File or directory already exist') raise NameError('NOT ALLOWED OPERATION : File or directory already exists')
if os.path.isdir(realfile): if os.path.isdir(realfile):
shutil.copytree(realfile, dest) shutil.copytree(realfile, dest)
if del_source: if del_source:
...@@ -128,7 +126,7 @@ class FileBrowser(object): ...@@ -128,7 +126,7 @@ class FileBrowser(object):
os.unlink(realfile) os.unlink(realfile)
except Exception as e: except Exception as e:
return str(e) return str(e)
return '{result: \'1\'}' return "{result: '1'}"
def rename(self, dir, filename, newfilename): def rename(self, dir, filename, newfilename):
"""Rename file or directory to dir/filename""" """Rename file or directory to dir/filename"""
...@@ -139,8 +137,8 @@ class FileBrowser(object): ...@@ -139,8 +137,8 @@ class FileBrowser(object):
tofile = os.path.join(realdir, newfilename) tofile = os.path.join(realdir, newfilename)
if not os.path.exists(tofile): if not os.path.exists(tofile):
os.rename(realfile, tofile) os.rename(realfile, tofile)
return '{result: \'1\'}' return "{result: '1'}"
raise NameError('NOT ALLOWED OPERATION : File or directory already exist') raise NameError('NOT ALLOWED OPERATION : File or directory already exists')
def copyAsFile(self, dir, filename, newfilename): def copyAsFile(self, dir, filename, newfilename):
"""Copy file or directory to dir/filename""" """Copy file or directory to dir/filename"""
...@@ -148,21 +146,21 @@ class FileBrowser(object): ...@@ -148,21 +146,21 @@ class FileBrowser(object):
fromfile = os.path.join(realdir, filename) fromfile = os.path.join(realdir, filename)
tofile = os.path.join(realdir, newfilename) tofile = os.path.join(realdir, newfilename)
if not os.path.exists(fromfile): if not os.path.exists(fromfile):
raise NameError('NOT ALLOWED OPERATION : File or directory not exist') raise NameError('NOT ALLOWED OPERATION : File or directory does not exist')
if not os.path.exists(tofile): if not os.path.exists(tofile):
shutil.copy(fromfile, tofile) shutil.copy(fromfile, tofile)
return '{result: \'1\'}' return "{result: '1'}"
raise NameError('NOT ALLOWED OPERATION : File or directory already exist') raise NameError('NOT ALLOWED OPERATION : File or directory already exists')
def uploadFile(self, dir, files): def uploadFile(self, dir, files):
"""Upload a list of file in directory dir""" """Upload a list of files in directory dir"""
realdir = self._realdir(dir) realdir = self._realdir(dir)
for file in files: for file in files:
if files[file]: if files[file]:
filename = werkzeug.secure_filename(files[file].filename) filename = werkzeug.secure_filename(files[file].filename)
if not os.path.exists(os.path.join(dir, filename)): if not os.path.exists(os.path.join(dir, filename)):
files[file].save(os.path.join(realdir, filename)) files[file].save(os.path.join(realdir, filename))
return '{result: \'1\'}' return "{result: '1'}"
def downloadFile(self, dir, filename): def downloadFile(self, dir, filename):
"""Download file dir/filename""" """Download file dir/filename"""
...@@ -179,7 +177,7 @@ class FileBrowser(object): ...@@ -179,7 +177,7 @@ class FileBrowser(object):
tozip = os.path.join(realdir, newfilename) tozip = os.path.join(realdir, newfilename)
fromzip = os.path.join(realdir, filename) fromzip = os.path.join(realdir, filename)
if not os.path.exists(fromzip): if not os.path.exists(fromzip):
raise NameError('NOT ALLOWED OPERATION : File or directory not exist') raise NameError('NOT ALLOWED OPERATION : File or directory does not exist')
if not os.path.exists(tozip): if not os.path.exists(tozip):
zip = zipfile.ZipFile(tozip, 'w', zipfile.ZIP_DEFLATED) zip = zipfile.ZipFile(tozip, 'w', zipfile.ZIP_DEFLATED)
if os.path.isdir(fromzip): if os.path.isdir(fromzip):
...@@ -191,8 +189,8 @@ class FileBrowser(object): ...@@ -191,8 +189,8 @@ class FileBrowser(object):
else: else:
zip.write(fromzip) zip.write(fromzip)
zip.close() zip.close()
return '{result: \'1\'}' return "{result: '1'}"
raise NameError('NOT ALLOWED OPERATION : File or directory already exist') raise NameError('NOT ALLOWED OPERATION : File or directory already exists')
def unzipFile(self, dir, filename, newfilename): def unzipFile(self, dir, filename, newfilename):
"""Extract a zipped archive""" """Extract a zipped archive"""
...@@ -200,7 +198,7 @@ class FileBrowser(object): ...@@ -200,7 +198,7 @@ class FileBrowser(object):
target = os.path.join(realdir, newfilename) target = os.path.join(realdir, newfilename)
archive = os.path.join(realdir, filename) archive = os.path.join(realdir, filename)
if not os.path.exists(archive): if not os.path.exists(archive):
raise NameError('NOT ALLOWED OPERATION : File or directory not exist') raise NameError('NOT ALLOWED OPERATION : File or directory does not exist')
if not os.path.exists(target): if not os.path.exists(target):
zip = zipfile.ZipFile(archive) zip = zipfile.ZipFile(archive)
#member = zip.namelist() #member = zip.namelist()
...@@ -209,8 +207,8 @@ class FileBrowser(object): ...@@ -209,8 +207,8 @@ class FileBrowser(object):
# zip.extractall(target) # zip.extractall(target)
#else: #else:
# zip.extract(member[0], newfilename) # zip.extract(member[0], newfilename)
return '{result: \'1\'}' return "{result: '1'}"
raise NameError('NOT ALLOWED OPERATION : File or directory already exist') raise NameError('NOT ALLOWED OPERATION : File or directory already exists')
def readFile(self, dir, filename, truncate=False): def readFile(self, dir, filename, truncate=False):
"""Read file dir/filename and return content""" """Read file dir/filename and return content"""
...@@ -221,6 +219,6 @@ class FileBrowser(object): ...@@ -221,6 +219,6 @@ class FileBrowser(object):
if not isText(realfile): if not isText(realfile):
return "FILE ERROR: Cannot display binary file, please open a text file only!" return "FILE ERROR: Cannot display binary file, please open a text file only!"
if not truncate: if not truncate:
return open(realfile, 'r').read() return open(realfile).read()
else: else:
return tail(open(realfile, 'r'), 0) return tail(open(realfile), 0)
...@@ -12,12 +12,12 @@ from flask import jsonify ...@@ -12,12 +12,12 @@ from flask import jsonify
def cloneRepo(data): def cloneRepo(data):
"""Clonne a repository """Clone a repository
Args: Args:
data: a dictionnary of parameters to use: data: a dictionary of parameters to use:
data['path'] is the path of the new project data['path'] is the path of the new project
data['repo'] is the url of the repository to be cloned data['repo'] is the url of the repository to be cloned
data['email'] is the user email data['email'] is the user's email
data['user'] is the name of the user data['user'] is the name of the user
Returns: Returns:
a jsonify data""" a jsonify data"""
...@@ -29,7 +29,7 @@ def cloneRepo(data): ...@@ -29,7 +29,7 @@ def cloneRepo(data):
json = "" json = ""
try: try:
if os.path.exists(workDir) and len(os.listdir(workDir)) < 2: if os.path.exists(workDir) and len(os.listdir(workDir)) < 2:
shutil.rmtree(workDir) #delete useless files shutil.rmtree(workDir) # delete useless files
repo = Repo.clone_from(data["repo"], workDir) repo = Repo.clone_from(data["repo"], workDir)
config_writer = repo.config_writer() config_writer = repo.config_writer()
config_writer.add_section("user") config_writer.add_section("user")
...@@ -42,10 +42,11 @@ def cloneRepo(data): ...@@ -42,10 +42,11 @@ def cloneRepo(data):
json = safeResult(str(e)) json = safeResult(str(e))
return jsonify(code=code, result=json) return jsonify(code=code, result=json)
def gitStatus(project): def gitStatus(project):
"""Run git status and return status of specified project folder """Run git status and return status of specified project folder
Args: Args:
project: path of the projet ti get status project: path of the projet to get status
Returns: Returns:
a parsed string that contains the result of git status""" a parsed string that contains the result of git status"""
code = 0 code = 0
...@@ -61,6 +62,7 @@ def gitStatus(project): ...@@ -61,6 +62,7 @@ def gitStatus(project):
json = safeResult(str(e)) json = safeResult(str(e))
return jsonify(code=code, result=json, branch=branch, dirty=isdirty) return jsonify(code=code, result=json, branch=branch, dirty=isdirty)
def switchBranch(project, name): def switchBranch(project, name):
"""Switch a git branch """Switch a git branch
Args: Args:
...@@ -76,13 +78,14 @@ def switchBranch(project, name): ...@@ -76,13 +78,14 @@ def switchBranch(project, name):
if name == current_branch: if name == current_branch:
json = "This is already your active branch for this project" json = "This is already your active branch for this project"
else: else:
git = repo.git git = repo.git
git.checkout(name) git.checkout(name)
code = 1 code = 1
except Exception as e: except Exception as e:
json = safeResult(str(e)) json = safeResult(str(e))
return jsonify(code=code, result=json) return jsonify(code=code, result=json)
def addBranch(project, name, onlyCheckout=False): def addBranch(project, name, onlyCheckout=False):
"""Add new git branch to the repository """Add new git branch to the repository
Args: Args:
...@@ -95,7 +98,7 @@ def addBranch(project, name, onlyCheckout=False): ...@@ -95,7 +98,7 @@ def addBranch(project, name, onlyCheckout=False):
json = "" json = ""
try: try:
repo = Repo(project) repo = Repo(project)
git = repo.git git = repo.git
if not onlyCheckout: if not onlyCheckout:
git.checkout('-b', name) git.checkout('-b', name)
else: else:
...@@ -105,6 +108,7 @@ def addBranch(project, name, onlyCheckout=False): ...@@ -105,6 +108,7 @@ def addBranch(project, name, onlyCheckout=False):
json = safeResult(str(e)) json = safeResult(str(e))
return jsonify(code=code, result=json) return jsonify(code=code, result=json)
def getDiff(project): def getDiff(project):
"""Get git diff for the specified project directory""" """Get git diff for the specified project directory"""
result = "" result = ""
...@@ -117,44 +121,40 @@ def getDiff(project): ...@@ -117,44 +121,40 @@ def getDiff(project):
result = safeResult(str(e)) result = safeResult(str(e))
return result return result
def gitCommit(project, msg):
"""Commit changes for the specified repository
Args:
project: directory of the local repository
msg: commit message"""
code = 1
json = ""
repo = Repo(project)
if repo.is_dirty:
git = repo.git
#add file to be commited
files = repo.untracked_files
for f in files:
git.add(f)
#Commit all modified and untracked files
git.commit('-a', '-m', msg)
else:
json = "Nothing to be commited"
return jsonify(code=code, result=json)
def gitPush(project): def gitPush(project, msg):
"""Push changes for the specified repository """Commit and Push changes for the specified repository
Args: Args:
project: directory of the local repository project: directory of the local repository
msg: commit message""" msg: commit message"""
code = 0 code = 0
json = "" json = ""
repo = Repo(project) undo_commit = False
try: try:
git = repo.git repo = Repo(project)
#push changes to repo if repo.is_dirty:
current_branch = repo.active_branch.name git = repo.git
git.push('origin', current_branch) current_branch = repo.active_branch.name
code = 1 #add file to be commited
files = repo.untracked_files
for f in files:
git.add(f)
#Commit all modified and untracked files
git.commit('-a', '-m', msg)
undo_commit = True
#push changes to repo
git.push('origin', current_branch)
code = 1
else:
json = "Nothing to commit"
code = 1
except Exception as e: except Exception as e:
if undo_commit:
git.reset("HEAD~") # undo previous commit
json = safeResult(str(e)) json = safeResult(str(e))
return jsonify(code=code, result=json) return jsonify(code=code, result=json)
def gitPull(project): def gitPull(project):
result = "" result = ""
code = 0 code = 0
...@@ -167,6 +167,7 @@ def gitPull(project): ...@@ -167,6 +167,7 @@ def gitPull(project):
result = safeResult(str(e)) result = safeResult(str(e))
return jsonify(code=code, result=result) return jsonify(code=code, result=result)
def safeResult(result): def safeResult(result):
"""Parse string and remove credential of the user""" """Parse string and remove credential of the user"""
regex = re.compile("(https:\/\/)([\w\d\._-]+:[\w\d\._-]+)\@([\S]+\s)", re.VERBOSE) regex = re.compile("(https:\/\/)([\w\d\._-]+:[\w\d\._-]+)\@([\S]+\s)", re.VERBOSE)
......
...@@ -9,7 +9,7 @@ SLAPRUNNER_PROCESS_LIST = [] ...@@ -9,7 +9,7 @@ SLAPRUNNER_PROCESS_LIST = []
class Popen(subprocess.Popen): class Popen(subprocess.Popen):
""" """
Extension of Popen to launch and kill process in a clean way Extension of Popen to launch and kill processes in a clean way
""" """
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
""" """
...@@ -28,7 +28,7 @@ class Popen(subprocess.Popen): ...@@ -28,7 +28,7 @@ class Popen(subprocess.Popen):
def kill(self, sig=signal.SIGTERM, recursive=False): def kill(self, sig=signal.SIGTERM, recursive=False):
""" """
Kill process and all its descendant if recursive Kill process and all its descendants if recursive
""" """
if self.poll() is None: if self.poll() is None:
if recursive: if recursive:
...@@ -83,7 +83,7 @@ def isRunning(name): ...@@ -83,7 +83,7 @@ def isRunning(name):
def killRunningProcess(name, recursive=False): def killRunningProcess(name, recursive=False):
""" """
Kill all process with name Kill all processes with a given name
""" """
for process in SLAPRUNNER_PROCESS_LIST: for process in SLAPRUNNER_PROCESS_LIST:
if process.name == name: if process.name == name:
...@@ -92,7 +92,7 @@ def killRunningProcess(name, recursive=False): ...@@ -92,7 +92,7 @@ def killRunningProcess(name, recursive=False):
def handler(sig, frame): def handler(sig, frame):
""" """
Signal handler to kill all process Signal handler to kill all processes
""" """
pid = os.getpid() pid = os.getpid()
os.kill(-pid, sig) os.kill(-pid, sig)
......
This diff is collapsed.
...@@ -37,8 +37,8 @@ function deleteCookie(name, path, domain) { ...@@ -37,8 +37,8 @@ function deleteCookie(name, path, domain) {
function setCookie(name, value, expires, path, domain, secure) { function setCookie(name, value, expires, path, domain, secure) {
"use strict"; "use strict";
if (getCookie(name) !== null){ if (getCookie(name) !== null) {
deleteCookie(name); deleteCookie(name);
} }
if (!expires) { if (!expires) {
var today = new Date(); var today = new Date();
......
...@@ -32,8 +32,14 @@ $(document).ready(function () { ...@@ -32,8 +32,14 @@ $(document).ready(function () {
$("#error").Popup(data.result, {type: 'alert', duration: 3000}); $("#error").Popup(data.result, {type: 'alert', duration: 3000});
} }
}) })
.error(function () { .error(function (response) {
$("#error").Popup("Cannot send your account identifier please try again!!", if (response && response.status === 401) {
$("#error").Popup('Login and/or password is incorrect.',
{type: 'alert', duration: 3000}
);
return
}
$("#error").Popup("Cannot send your account identifier",
{type: 'alert', duration: 3000}); {type: 'alert', duration: 3000});
}) })
.complete(function () { .complete(function () {
......
...@@ -44,24 +44,24 @@ function clearAll(setStop) { ...@@ -44,24 +44,24 @@ function clearAll(setStop) {
running = setStop; running = setStop;
} }
function removeFirstLog(){ function removeFirstLog() {
"use strict"; "use strict";
currentLogSize -= parseInt($("#salpgridLog p:first-child").attr('rel'), 10); currentLogSize -= parseInt($("#salpgridLog p:first-child").attr('rel'), 10);
$("#salpgridLog p:first-child").remove(); $("#salpgridLog p:first-child").remove();
} }
function getRunningState() { function getRunningState() {
"use strict"; "use strict";
var size = 0; var size = 0,
var log_info = ""; log_info = "",
var param = { param = {
position: logReadingPosition, position: logReadingPosition,
log: (processState !== "Checking" && openedlogpage === processType.toLowerCase()) ? openedlogpage : "" log: (processState !== "Checking" && openedlogpage === processType.toLowerCase()) ? openedlogpage : ""
}, },
jqxhr = $.post(url, param, function (data) { jqxhr = $.post(url, param, function (data) {
setRunningState(data); setRunningState(data);
size = data.content.position - logReadingPosition; size = data.content.position - logReadingPosition;
if (logReadingPosition !== 0 && data.content.truncated){ if (logReadingPosition !== 0 && data.content.truncated) {
log_info = "<p class='info' rel='0'>SLAPRUNNER INFO: SLAPGRID-LOG HAS BEEN TRUNCATED HERE. To see full log reload your log page</p>"; log_info = "<p class='info' rel='0'>SLAPRUNNER INFO: SLAPGRID-LOG HAS BEEN TRUNCATED HERE. To see full log reload your log page</p>";
} }
logReadingPosition = data.content.position; logReadingPosition = data.content.position;
...@@ -78,18 +78,16 @@ function getRunningState() { ...@@ -78,18 +78,16 @@ function getRunningState() {
} }
processState = running ? "Running" : "Stopped"; processState = running ? "Running" : "Stopped";
currentLogSize += parseInt(size, 10); currentLogSize += parseInt(size, 10);
if (currentLogSize > maxLogSize){ if (currentLogSize > maxLogSize) {
//Remove the first element into log div //Remove the first element into log div
removeFirstLog(); removeFirstLog();
if (currentLogSize > maxLogSize){ if (currentLogSize > maxLogSize) {
removeFirstLog(); //in cas of previous <p/> size is 0 removeFirstLog(); //in cas of previous <p/> size is 0
} }
} }
}) }).error(function () {
.error(function () {
clearAll(false); clearAll(false);
}) }).complete(function () {
.complete(function () {
if (running) { if (running) {
setTimeout(function () { setTimeout(function () {
getRunningState(); getRunningState();
......
...@@ -19,11 +19,10 @@ $(document).ready(function () { ...@@ -19,11 +19,10 @@ $(document).ready(function () {
send = false, send = false,
edit = false, edit = false,
selection = "", selection = "",
edit_status = ""; edit_status = "",
base_path = function () {
var base_path = function() { return softwareDisplay ? projectDir : currentProject;
return softwareDisplay ? projectDir : currentProject; };
}
function setEditMode(file) { function setEditMode(file) {
var i, var i,
......
...@@ -6,5 +6,5 @@ ...@@ -6,5 +6,5 @@
$(document).ready(function () { $(document).ready(function () {
"use strict"; "use strict";
$('#fileNavigator').gsFileManager({script: $SCRIPT_ROOT + "/fileBrowser", root:'workspace/'}); $('#fileNavigator').gsFileManager({script: $SCRIPT_ROOT + "/fileBrowser", root: 'workspace/'});
}); });
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment