Commit 049c42b1 authored by Bryton Lacquement's avatar Bryton Lacquement 🚪 Committed by Julien Muchembled

fixup! Add support for Python 3

parent 31532fac
......@@ -345,8 +345,8 @@ def main():
# Select an unused computer to run the test.
group = test_mapping.getNextGroup(
ignore_list = [group for _, _, group in \
running_test_dict.itervalues()])
ignore_list = [group for _, _, group in
six.itervalues(running_test_dict)])
# Select a test
test_line = test_result.start(
......@@ -455,7 +455,7 @@ def main():
logger.info('Sleeping %is...', to_sleep)
time.sleep(to_sleep)
if not test_result.isAlive():
for _, tester, computer_id in running_test_dict.itervalues():
for _, tester, computer_id in six.itervalues(running_test_dict):
tester.teardown()
time.sleep(300)
......
# -*- coding: utf-8 -*-
from __future__ import print_function
from six.moves import configparser
import argparse
from six.moves import dbm_gnu as gdbm
......@@ -27,7 +28,7 @@ def main():
if args.pid is not None:
pid_filename = args.pid[0]
if os.path.exists(pid_filename):
print >> sys.stderr, "Already running"
print("Already running", file=sys.stderr)
return 127
with open(pid_filename, 'w') as pid_file:
pid_file.write(str(os.getpid()))
......
......@@ -42,6 +42,14 @@ from six.moves import socketserver
import io
import threading
try:
logging_levels = logging._nameToLevel
logging_choices = logging_levels.keys()
except AttributeError:
logging_levels = logging._levelNames
logging_choices = [i for i in logging_levels
if isinstance(i, str)]
# Copied from erp5.util:erp5/util/testnode/ProcessManager.py
def subprocess_capture(p, log, log_prefix, get_output=True):
def readerthread(input, output, buffer):
......@@ -81,9 +89,9 @@ class EqueueServer(socketserver.ThreadingUnixStreamServer):
def __init__(self, *args, **kw):
self.options = kw.pop('equeue_options')
super(EqueueServer, self).__init__(self,
RequestHandlerClass=None,
*args, **kw)
socketserver.ThreadingUnixStreamServer.__init__(self,
RequestHandlerClass=None,
*args, **kw)
# Equeue Specific elements
self.setLogger(self.options.logfile[0], self.options.loglevel[0])
self.setDB(self.options.database[0])
......@@ -99,7 +107,7 @@ class EqueueServer(socketserver.ThreadingUnixStreamServer):
self.logger = logging.getLogger("EQueue")
handler = logging.handlers.WatchedFileHandler(logfile, mode='a')
# Natively support logrotate
level = logging._levelNames.get(loglevel, logging.INFO)
level = logging_levels.get(loglevel, logging.INFO)
self.logger.setLevel(level)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
......@@ -131,7 +139,7 @@ class EqueueServer(socketserver.ThreadingUnixStreamServer):
try:
sys.stdout.flush()
p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr=subprocess.PIPE, universal_newlines=True)
subprocess_capture(p, self.logger.info, '', True)
if p.returncode == 0:
self.logger.info("%s finished successfully.", cmd_readable)
......@@ -172,7 +180,7 @@ class EqueueServer(socketserver.ThreadingUnixStreamServer):
try:
request.send(command)
except:
except Exception:
self.logger.warning("Couldn't respond to %r", request.fileno())
self.close_request(request)
self._runCommandIfNeeded(command, timestamp)
......@@ -193,8 +201,7 @@ def main():
"calls are stored")
parser.add_argument('--loglevel', nargs=1,
default='INFO',
choices=[i for i in logging._levelNames
if isinstance(i, str)],
choices=logging_choices,
required=False)
parser.add_argument('-l', '--logfile', nargs=1, required=True,
help="Path to the log file.")
......
......@@ -89,19 +89,19 @@ def setup(arguments):
time.sleep(3)
continue
time.sleep(timeout)
if arguments.has_key('delete_target'):
if 'delete_target' in arguments:
delete(arguments)
if arguments.has_key('source'):
if 'source' in arguments:
rename(arguments)
if arguments.has_key('script'):
if 'script' in arguments:
run_script(arguments)
if arguments.has_key('sql_script'):
if 'sql_script' in arguments:
run_sql_script(arguments)
if arguments.has_key('chmod_target'):
if 'chmod_target' in arguments:
chmod(arguments)
return
......
......@@ -33,7 +33,7 @@ def buildStatistic(history_folder):
last_date = None
if stats_dict["data"]:
if stats_dict["data"][-1].has_key("start-date"):
if "start-date" in stats_dict["data"][-1]:
last_date = stats_dict["data"][-1]["start-date"]
else:
last_date = stats_dict["data"][-1]["date"]
......
......@@ -293,17 +293,17 @@ def main():
if process_result and process_result['total_process'] != 0.0:
appendToJsonFile(process_file, ", ".join(
[str(process_result[key]) for key in label_list if process_result.has_key(key)])
str(process_result[key]) for key in label_list if key in process_result)
)
resource_status_dict.update(process_result)
if memory_result and memory_result['memory_rss'] != 0.0:
appendToJsonFile(mem_file, ", ".join(
[str(memory_result[key]) for key in label_list if memory_result.has_key(key)])
str(memory_result[key]) for key in label_list if key in memory_result)
)
resource_status_dict.update(memory_result)
if io_result and io_result['io_rw_counter'] != 0.0:
appendToJsonFile(io_file, ", ".join(
[str(io_result[key]) for key in label_list if io_result.has_key(key)])
str(io_result[key]) for key in label_list if key in io_result)
)
resource_status_dict.update(io_result)
......
......@@ -14,7 +14,7 @@ def get_curl(buffer, url):
result = "OK"
try:
curl.perform()
except:
except Exception:
import traceback
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
......
......@@ -16,7 +16,7 @@ from tzlocal import get_localzone
class RunPromise(GenericPromise):
def __init__(self, config):
GenericPromise.__init__(self, config)
super(RunPromise, self).__init__(config)
# check backup ran OK every 5 minutes
self.setPeriodicity(minute=5)
......
......@@ -8,7 +8,7 @@ import os
@implementer(interface.IPromise)
class RunPromise(GenericPromise):
def __init__(self, config):
GenericPromise.__init__(self, config)
super(RunPromise, self).__init__(config)
# set periodicity to run the promise twice per day
self.custom_frequency = int(self.getConfig('frequency', 720))
self.setPeriodicity(self.custom_frequency)
......
......@@ -12,7 +12,7 @@ r = re.compile(br"^([0-9]+\-[0-9]+\-[0-9]+ [0-9]+\:[0-9]+\:[0-9]+)(\,[0-9]+) - (
@implementer(interface.IPromise)
class RunPromise(GenericPromise):
def __init__(self, config):
GenericPromise.__init__(self, config)
super(RunPromise, self).__init__(config)
self.setPeriodicity(minute=10)
def sense(self):
......
......@@ -6,7 +6,7 @@ from slapos.grid.promise.generic import GenericPromise
@implementer(interface.IPromise)
class RunPromise(GenericPromise):
def __init__(self, config):
GenericPromise.__init__(self, config)
super(RunPromise, self).__init__(config)
# SR can set custom periodicity
self.setPeriodicity(float(self.getConfig('frequency', 2)))
......@@ -22,7 +22,8 @@ class RunPromise(GenericPromise):
url = self.getConfig('url').strip()
try:
result = open(filename).read()
with open(filename) as f:
result = f.read()
except Exception as e:
self.logger.error(
"ERROR %r during opening and reading file %r" % (e, filename))
......
......@@ -16,7 +16,7 @@ from slapos.collect.db import Database
class RunPromise(GenericPromise):
def __init__(self, config):
GenericPromise.__init__(self, config)
super(RunPromise, self).__init__(config)
# check disk space at least every 3 minutes
self.setPeriodicity(minute=3)
......@@ -131,7 +131,7 @@ class RunPromise(GenericPromise):
min_free_size = int(min_size_str)*1024*1024
else:
with open(disk_threshold_file, 'w') as f:
f.write(str(min_free_size/(1024*1024)))
f.write(str(min_free_size//(1024*1024)))
if check_date:
# testing mode
......
......@@ -8,7 +8,7 @@ from slapos.networkbench.ping import ping, ping6
@implementer(interface.IPromise)
class RunPromise(GenericPromise):
def __init__(self, config):
GenericPromise.__init__(self, config)
super(RunPromise, self).__init__(config)
# set periodicity to run the promise twice per day
self.custom_frequency = int(self.getConfig('frequency', 720))
self.setPeriodicity(self.custom_frequency)
......
......@@ -8,7 +8,7 @@ from datetime import datetime
class RunPromise(GenericPromise):
def __init__(self, config):
GenericPromise.__init__(self, config)
super(RunPromise, self).__init__(config)
self.setPeriodicity(minute=1)
def sense(self):
......
from zope import interface as zope_interface
from zope.interface import implementer
from slapos.grid.promise import interface
from slapos.grid.promise.generic import GenericPromise
import socket
import sys
@implementer(interface.IPromise)
class RunPromise(GenericPromise):
zope_interface.implements(interface.IPromise)
def __init__(self, config):
GenericPromise.__init__(self, config)
super(RunPromise, self).__init__(config)
# check port is listening at least every 2 minutes
self.setPeriodicity(minute=2)
......@@ -30,9 +28,9 @@ class RunPromise(GenericPromise):
# self.logger.info("port connection OK")
try:
socket.create_connection(addr).close()
except (socket.herror, socket.gaierror), e:
except (socket.herror, socket.gaierror) as e:
self.logger.error("ERROR hostname/port ({}) is not correct: {}".format(addr, e))
except (socket.error, socket.timeout), e:
except (socket.error, socket.timeout) as e:
self.logger.error("ERROR while connecting to {}: {}".format(addr, e))
else:
self.logger.info("port connection OK ({})".format(addr))
......
......@@ -8,7 +8,7 @@ from slapos.networkbench.ping import ping, ping6
@implementer(interface.IPromise)
class RunPromise(GenericPromise):
def __init__(self, config):
GenericPromise.__init__(self, config)
super(RunPromise, self).__init__(config)
# set periodicity to run the promise twice per day
self.custom_frequency = int(self.getConfig('frequency', 720))
self.setPeriodicity(self.custom_frequency)
......
......@@ -9,22 +9,23 @@ import os
class RunPromise(GenericPromise):
def __init__(self, config):
GenericPromise.__init__(self, config)
super(RunPromise, self).__init__(config)
# test load every 3 minutes
self.setPeriodicity(minute=3)
def checkCPULoad(self, tolerance=2.2):
# tolerance=1.5 => accept CPU load up to 1.5 =150%
uptime_result = subprocess.check_output(['uptime'], universal_newlines=True)
uptime_result = subprocess.check_output('uptime', universal_newlines=True)
line = uptime_result.strip().split(' ')
load, load5, long_load = line[-3:]
long_load = float(long_load.replace(',', '.'))
core_count = int(subprocess.check_output(['nproc']).strip())
core_count = int(subprocess.check_output('nproc').strip())
max_load = core_count * tolerance
if long_load > max_load:
# display top statistics
top_result = subprocess.check_output(['top', '-n', '1', '-b'])
top_result = subprocess.check_output(('top', '-n', '1', '-b'),
universal_newlines=True)
message = "CPU load is high: %s %s %s\n\n" % (load, load5, long_load)
i = 0
result_list = top_result.split('\n')
......
......@@ -8,7 +8,7 @@ import requests
@implementer(interface.IPromise)
class RunPromise(GenericPromise):
def __init__(self, config):
GenericPromise.__init__(self, config)
super(RunPromise, self).__init__(config)
# SR can set custom periodicity
self.setPeriodicity(float(self.getConfig('frequency', 2)))
......
......@@ -10,7 +10,7 @@ from .util import tail_file
class RunPromise(GenericPromise):
def __init__(self, config):
GenericPromise.__init__(self, config)
super(RunPromise, self).__init__(config)
self.setPeriodicity(minute=2)
def sense(self):
......
from zope import interface as zope_interface
from zope.interface import implementer
from slapos.grid.promise import interface
from slapos.grid.promise.generic import GenericPromise
......@@ -7,12 +7,10 @@ try:
except ImportError:
import subprocess
@implementer(interface.IPromise)
class RunPromise(GenericPromise):
zope_interface.implements(interface.IPromise)
def __init__(self, config):
GenericPromise.__init__(self, config)
super(RunPromise, self).__init__(config)
self.setPeriodicity(minute=int(self.getConfig('frequency', 5)))
def sense(self):
......@@ -31,16 +29,18 @@ class RunPromise(GenericPromise):
self.logger.error("Wrapper %r not supported." % (wrapper,))
return
process = subprocess.Popen(
try:
subprocess.subprocess.check_output(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
result = process.communicate()[0].strip()
if process.returncode == 0:
self.logger.info("OK")
)
except subprocess.CalledProcessError as e:
result = e.output.strip()
self.logger.error(message, result if str is bytes else
result.decode('utf-8', 'replace'))
else:
self.logger.error(message % (result,))
self.logger.info("OK")
def anomaly(self):
"""
......
from zope import interface as zope_interface
from zope.interface import implementer
from slapos.grid.promise import interface
from slapos.grid.promise.generic import GenericPromise
try:
......@@ -6,12 +6,10 @@ try:
except ImportError:
import subprocess
@implementer(interface.IPromise)
class RunPromise(GenericPromise):
zope_interface.implements(interface.IPromise)
def __init__(self, config):
GenericPromise.__init__(self, config)
super(RunPromise, self).__init__(config)
# check configuration every 5 minutes (only for anomaly)
self.setPeriodicity(minute=int(self.getConfig('frequency', 5)))
......@@ -23,16 +21,14 @@ class RunPromise(GenericPromise):
validate_script = self.getConfig('verification-script')
if not validate_script:
raise ValueError("'verification-script' was not set in promise parameters.")
process = subprocess.Popen(
[validate_script],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
message = process.communicate()[0]
if process.returncode == 0:
self.logger.info("OK")
try:
subprocess.check_output(validate_script, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
message = e.output
self.logger.error(message if str is bytes else
message.decode('utf-8', 'replace'))
else:
self.logger.error("%s" % message)
self.logger.info("OK")
def anomaly(self):
return self._anomaly(result_count=1, failure_amount=1)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import csv
import datetime
import json
import httplib
from six.moves import http_client as httplib
import os
import shutil
import socket
......@@ -13,8 +14,8 @@ import subprocess
import sys
import time
import traceback
import urllib2
import urlparse
from six.moves.urllib.request import urlopen
from six.moves.urllib.parse import urlparse
import uuid
def createStatusItem(item_directory, instance_name, callback, date, link, status):
......@@ -79,8 +80,7 @@ def main():
saveStatus('STARTED')
if args.max_run <= 0:
print "--max-run argument takes a strictely positive number as argument"
sys.exit(-1)
parser.error("--max-run argument takes a strictly positive number as argument")
while args.max_run > 0:
try:
......@@ -108,7 +108,7 @@ def main():
content.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
))
print content
print(content)
# Write feed safely
error_message = ""
......@@ -128,7 +128,7 @@ def main():
'slapos:%s' % uuid.uuid4(),
])
os.rename(temp_file, args.logfile[0])
except Exception, e:
except Exception as e:
error_message = "ERROR ON WRITING FEED - %s" % str(e)
finally:
try:
......@@ -143,14 +143,14 @@ def main():
if exit_code != 0:
sys.exit(exit_code)
print 'Fetching %s feed...' % args.feed_url[0]
print('Fetching %s feed...' % args.feed_url[0])
feed = urllib2.urlopen(args.feed_url[0])
feed = urlopen(args.feed_url[0])
body = feed.read()
some_notification_failed = False
for notif_url in args.notification_url:
notification_url = urlparse.urlparse(notif_url)
notification_url = urlparse(notif_url)
notification_port = notification_url.port
if notification_port is None:
......
......@@ -26,6 +26,7 @@
#
##############################################################################
from __future__ import print_function
import argparse
import json
import importlib
......@@ -120,7 +121,7 @@ def runTestSuite(test_suite_title, test_suite_arguments, logger):
parsed_arguments = dict(key.split('=') for key in test_suite_arguments)
test_suite_module = importFrom(test_suite_title)
success = test_suite_module.runTestSuite(**parsed_arguments)
except:
except Exception:
logger.exception('Impossible to run resiliency test:')
success = False
return success
......@@ -228,7 +229,7 @@ def runResiliencyTest():
"""
error_message_set, exit_status = ScalabilityLauncher().run()
for error_message in error_message_set:
print >>sys.stderr, 'ERROR: %s' % error_message
print('ERROR: %s' % error_message, file=sys.stderr)
sys.exit(exit_status)
......@@ -284,7 +285,5 @@ def runUnitTest():
test_count=1,
error_count=error_count,
duration=test_duration)
except:
raise
finally:
os.remove(fname)
......@@ -33,8 +33,9 @@ import random
import ssl
import string
import time
import urllib
import urllib2
from six.moves.urllib.parse import quote
from six.moves.urllib.request import HTTPBasicAuthHandler, HTTPSHandler, \
build_opener
class NotHttpOkException(Exception):
pass
......@@ -50,7 +51,7 @@ class ERP5TestSuite(SlaprunnerTestSuite):
Set inside of slaprunner the instance parameter to use to deploy erp5 instance.
"""
p = '<?xml version="1.0" encoding="utf-8"?> <instance> <parameter id="_">{"zodb-zeo": {"backup-periodicity": "*:1/4"}, "mariadb": {"backup-periodicity": "*:1/4"}}</parameter> </instance>'
parameter = urllib2.quote(p)
parameter = quote(p)
self._connectToSlaprunner(
resource='saveParameterXml',
data='software_type=default&parameter=%s' % parameter)
......@@ -109,7 +110,7 @@ class ERP5TestSuite(SlaprunnerTestSuite):
resource='/saveFileContent',
data='file=runner_workdir%%2Finstance%%2F%s%%2Fetc%%2Fhaproxy.cfg&content=%s' % (
haproxy_slappart,
urllib.quote(file_content),
quote(file_content),
)
)
......@@ -133,12 +134,12 @@ class ERP5TestSuite(SlaprunnerTestSuite):
def _connectToERP5(self, url, data=None, password=None):
if password is None:
password = self._getERP5Password()
auth_handler = urllib2.HTTPBasicAuthHandler()
auth_handler = HTTPBasicAuthHandler()
auth_handler.add_password(realm='Zope', uri=url, user='zope', passwd=password)
ssl_context = ssl._create_unverified_context()
opener_director = urllib2.build_opener(
opener_director = build_opener(
auth_handler,
urllib2.HTTPSHandler(context=ssl_context)
HTTPSHandler(context=ssl_context)
)
self.logger.info('Calling ERP5 url %s' % url)
......@@ -213,7 +214,7 @@ class ERP5TestSuite(SlaprunnerTestSuite):
try:
if "erp5" == self._getCreatedERP5SiteId():
break
except:
except Exception:
self.logger.info("Fail to connect to erp5.... wait a bit longer")
pass
......
......@@ -167,7 +167,7 @@ class GitlabTestSuite(SlaprunnerTestSuite):
while loop < 3:
try:
self._connectToGitlab(url=self.backend_url)
except Exception, e:
except Exception as e:
if loop == 2:
raise
self.logger.warning(str(e))
......
......@@ -32,7 +32,7 @@ import logging
import random
import string
import time
import urllib
from six.moves.urllib.request import urlopen
logger = logging.getLogger('KVMResiliencyTest')
......@@ -45,7 +45,7 @@ def fetchKey(ip):
new_key = None
for i in range(0, 10):
try:
new_key = urllib.urlopen('http://%s:10080/get' % ip).read().strip()
new_key = urlopen('http://%s:10080/get' % ip).read().strip()
break
except IOError:
logger.error('Server in new KVM does not answer.')
......@@ -148,7 +148,7 @@ class KVMTestSuite(ResiliencyTestSuite):
for i in range(0, 60):
failure = False
try:
connection = urllib.urlopen('http://%s:10080/set?key=%s' % (self.ip, self.key))
connection = urlopen('http://%s:10080/set?key=%s' % (self.ip, self.key))
if connection.getcode() is 200:
break
else:
......
......@@ -34,7 +34,7 @@ import os
import subprocess
import sys
import time
import urllib2
from six.moves.urllib.request import urlopen
UNIT_TEST_ERP5TESTNODE = 'UnitTest'
......@@ -85,13 +85,13 @@ class ResiliencyTestSuite(object):
takeover_url = root_partition_parameter_dict['takeover-%s-%s-url' % (namebase, target_clone)]
takeover_password = root_partition_parameter_dict['takeover-%s-%s-password' % (namebase, target_clone)]
# Connect to takeover web interface
takeover_page_content = urllib2.urlopen(takeover_url).read()
takeover_page_content = urlopen(takeover_url).read()
# Wait for importer script to be not running
while 'Importer script(s) of backup in progress: True' in takeover_page_content:
time.sleep(10)
takeover_page_content = urllib2.urlopen(takeover_url).read()
takeover_page_content = urlopen(takeover_url).read()
# Do takeover
takeover_result = urllib2.urlopen('%s?password=%s' % (takeover_url, takeover_password)).read()
takeover_result = urlopen('%s?password=%s' % (takeover_url, takeover_password)).read()
if 'Error' in takeover_result:
raise Exception('Error while doing takeover: %s' % takeover_result)
......@@ -214,7 +214,8 @@ class ResiliencyTestSuite(object):
if 'monitor' in promise:
continue
try:
process = subprocess.check_output(os.path.join(promise_directory, promise))
subprocess.check_output(os.path.join(promise_directory, promise),
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.logger.error('ERROR : promise "%s" failed with output :\n%s', promise, e.output)
return False
......
......@@ -29,15 +29,16 @@
from .resiliencytestsuite import ResiliencyTestSuite
import base64
import cookielib
from six.moves import http_cookiejar as cookielib
import json
from lxml import etree
import random
import ssl
import string
import time
import urllib2
import urllib
from six.moves.urllib.request import HTTPCookieProcessor, HTTPSHandler, \
build_opener
from six.moves.urllib.error import HTTPError
class NotHttpOkException(Exception):
pass
......@@ -52,9 +53,9 @@ class SlaprunnerTestSuite(ResiliencyTestSuite):
cookie_jar = cookielib.CookieJar()
ssl_context = ssl._create_unverified_context()
self._opener_director = urllib2.build_opener(
urllib2.HTTPCookieProcessor(cookie_jar),
urllib2.HTTPSHandler(context=ssl_context)
self._opener_director = build_opener(
HTTPCookieProcessor(cookie_jar),