Commit 72ddfb33 authored by Jérome Perrin's avatar Jérome Perrin

Fix most ResourceWarning in the test suite

https://docs.python.org/dev/whatsnew/3.2.html python3 warns about not unclosed files and sockets.

Use context managers with files to close them.

Use context manager to close socket to supervisor on python 3. On python 2 these sockets remain not closed as before.

There are a few warnings left, but output starts to be readable again.

/reviewed-on nexedi/slapos.core!78
parents 52a59437 35de3ee1
...@@ -568,7 +568,8 @@ class Partition(object): ...@@ -568,7 +568,8 @@ class Partition(object):
shutil.copy(template_location, config_location) shutil.copy(template_location, config_location)
# fill generated buildout with additional information # fill generated buildout with additional information
buildout_text = open(config_location).read() with open(config_location) as f:
buildout_text = f.read()
buildout_text += '\n\n' + bytes2str(pkg_resources.resource_string(__name__, buildout_text += '\n\n' + bytes2str(pkg_resources.resource_string(__name__,
'templates/buildout-tail.cfg.in')) % { 'templates/buildout-tail.cfg.in')) % {
'computer_id': self.computer_id, 'computer_id': self.computer_id,
...@@ -580,7 +581,8 @@ class Partition(object): ...@@ -580,7 +581,8 @@ class Partition(object):
'storage_home': self.instance_storage_home, 'storage_home': self.instance_storage_home,
'global_ipv4_network_prefix': self.ipv4_global_network, 'global_ipv4_network_prefix': self.ipv4_global_network,
} }
open(config_location, 'w').write(buildout_text) with open(config_location, 'w') as f:
f.write(buildout_text)
os.chmod(config_location, 0o640) os.chmod(config_location, 0o640)
# Try to find the best possible buildout: # Try to find the best possible buildout:
# *) if software_root/bin/bootstrap exists use this one to bootstrap # *) if software_root/bin/bootstrap exists use this one to bootstrap
...@@ -706,14 +708,16 @@ class Partition(object): ...@@ -706,14 +708,16 @@ class Partition(object):
"""Asks supervisord to start the instance. If this instance is not """Asks supervisord to start the instance. If this instance is not
installed, we install it. installed, we install it.
""" """
supervisor = self.getSupervisorRPC()
partition_id = self.computer_partition.getId() partition_id = self.computer_partition.getId()
try: try:
supervisor.startProcessGroup(partition_id, False) with self.getSupervisorRPC() as supervisor:
supervisor.startProcessGroup(partition_id, False)
except xmlrpclib.Fault as exc: except xmlrpclib.Fault as exc:
if exc.faultString.startswith('BAD_NAME:'): if exc.faultString.startswith('BAD_NAME:'):
self.logger.info("Nothing to start on %s..." % self.logger.info("Nothing to start on %s..." %
self.computer_partition.getId()) self.computer_partition.getId())
else:
raise
else: else:
self.logger.info("Requested start of %s..." % self.computer_partition.getId()) self.logger.info("Requested start of %s..." % self.computer_partition.getId())
...@@ -721,11 +725,13 @@ class Partition(object): ...@@ -721,11 +725,13 @@ class Partition(object):
"""Asks supervisord to stop the instance.""" """Asks supervisord to stop the instance."""
partition_id = self.computer_partition.getId() partition_id = self.computer_partition.getId()
try: try:
supervisor = self.getSupervisorRPC() with self.getSupervisorRPC() as supervisor:
supervisor.stopProcessGroup(partition_id, False) supervisor.stopProcessGroup(partition_id, False)
except xmlrpclib.Fault as exc: except xmlrpclib.Fault as exc:
if exc.faultString.startswith('BAD_NAME:'): if exc.faultString.startswith('BAD_NAME:'):
self.logger.info('Partition %s not known in supervisord, ignoring' % partition_id) self.logger.info('Partition %s not known in supervisord, ignoring' % partition_id)
else:
raise
else: else:
self.logger.info("Requested stop of %s..." % self.computer_partition.getId()) self.logger.info("Requested stop of %s..." % self.computer_partition.getId())
...@@ -796,15 +802,18 @@ class Partition(object): ...@@ -796,15 +802,18 @@ class Partition(object):
def checkProcessesFromStateList(self, process_list, state_list): def checkProcessesFromStateList(self, process_list, state_list):
"""Asks supervisord to check if one of the processes are in the state_list.""" """Asks supervisord to check if one of the processes are in the state_list."""
supervisor = self.getSupervisorRPC()
for process in process_list: for process in process_list:
try: try:
info = supervisor.getProcessInfo(process) with self.getSupervisorRPC() as supervisor:
info = supervisor.getProcessInfo(process)
if info['statename'] in state_list: if info['statename'] in state_list:
return True return True
except xmlrpclib.Fault as exc: except xmlrpclib.Fault as exc:
self.logger.debug("BAD process name: %r" % process) if exc.faultString.startswith('BAD_NAME:'):
continue self.logger.debug("BAD process name: %r" % process)
continue
else:
raise
return False return False
def cleanupFolder(self, folder_path): def cleanupFolder(self, folder_path):
...@@ -830,43 +839,43 @@ class Partition(object): ...@@ -830,43 +839,43 @@ class Partition(object):
# In future it will not be needed, as update command # In future it will not be needed, as update command
# is going to be implemented on server side. # is going to be implemented on server side.
self.logger.debug('Updating supervisord') self.logger.debug('Updating supervisord')
supervisor = self.getSupervisorRPC() with self.getSupervisorRPC() as supervisor:
# took from supervisord.supervisorctl.do_update # took from supervisord.supervisorctl.do_update
result = supervisor.reloadConfig() result = supervisor.reloadConfig()
added, changed, removed = result[0] added, changed, removed = result[0]
for gname in removed: for gname in removed:
results = supervisor.stopProcessGroup(gname) results = supervisor.stopProcessGroup(gname)
fails = [res for res in results fails = [res for res in results
if res['status'] == xmlrpc.Faults.FAILED] if res['status'] == xmlrpc.Faults.FAILED]
if fails: if fails:
self.logger.warning('Problem while stopping process %r, will try later' % gname) self.logger.warning('Problem while stopping process %r, will try later' % gname)
else: else:
self.logger.info('Stopped %r' % gname)
for i in range(0, 10):
# Some process may be still running, be nice and wait for them to be stopped.
try:
supervisor.removeProcessGroup(gname)
break
except:
if i == 9:
raise
time.sleep(1)
self.logger.info('Removed %r' % gname)
for gname in changed:
results = supervisor.stopProcessGroup(gname)
self.logger.info('Stopped %r' % gname) self.logger.info('Stopped %r' % gname)
for i in range(0, 10):
# Some process may be still running, be nice and wait for them to be stopped. supervisor.removeProcessGroup(gname)
try: supervisor.addProcessGroup(gname)
supervisor.removeProcessGroup(gname) self.logger.info('Updated %r' % gname)
break
except: for gname in added:
if i == 9: supervisor.addProcessGroup(gname)
raise self.logger.info('Updated %r' % gname)
time.sleep(1) self.logger.debug('Supervisord updated')
self.logger.info('Removed %r' % gname)
for gname in changed:
results = supervisor.stopProcessGroup(gname)
self.logger.info('Stopped %r' % gname)
supervisor.removeProcessGroup(gname)
supervisor.addProcessGroup(gname)
self.logger.info('Updated %r' % gname)
for gname in added:
supervisor.addProcessGroup(gname)
self.logger.info('Updated %r' % gname)
self.logger.debug('Supervisord updated')
def _set_ownership(self, path): def _set_ownership(self, path):
""" """
......
...@@ -995,7 +995,8 @@ stderr_logfile_backups=1 ...@@ -995,7 +995,8 @@ stderr_logfile_backups=1
periodicity_path = os.path.join(software_path, 'periodicity') periodicity_path = os.path.join(software_path, 'periodicity')
if os.path.exists(periodicity_path): if os.path.exists(periodicity_path):
try: try:
periodicity = int(open(periodicity_path).read()) with open(periodicity_path) as f:
periodicity = int(f.read())
except ValueError: except ValueError:
os.remove(periodicity_path) os.remove(periodicity_path)
self.logger.exception('') self.logger.exception('')
...@@ -1031,7 +1032,8 @@ stderr_logfile_backups=1 ...@@ -1031,7 +1032,8 @@ stderr_logfile_backups=1
# changed). # changed).
if (computer_partition_id not in self.computer_partition_filter_list and if (computer_partition_id not in self.computer_partition_filter_list and
not self.develop and os.path.exists(timestamp_path)): not self.develop and os.path.exists(timestamp_path)):
old_timestamp = open(timestamp_path).read() with open(timestamp_path) as f:
old_timestamp = f.read()
last_runtime = int(os.path.getmtime(timestamp_path)) last_runtime = int(os.path.getmtime(timestamp_path))
if timestamp: if timestamp:
try: try:
...@@ -1064,9 +1066,7 @@ stderr_logfile_backups=1 ...@@ -1064,9 +1066,7 @@ stderr_logfile_backups=1
# Include Partition Logging # Include Partition Logging
log_folder_path = "%s/.slapgrid/log" % instance_path log_folder_path = "%s/.slapgrid/log" % instance_path
mkdir_p(log_folder_path) mkdir_p(log_folder_path)
partition_file_handler = logging.FileHandler(
filename="%s/instance.log" % (log_folder_path)
)
stat_info = os.stat(instance_path) stat_info = os.stat(instance_path)
chownDirectory("%s/.slapgrid" % instance_path, chownDirectory("%s/.slapgrid" % instance_path,
uid=stat_info.st_uid, uid=stat_info.st_uid,
...@@ -1074,9 +1074,13 @@ stderr_logfile_backups=1 ...@@ -1074,9 +1074,13 @@ stderr_logfile_backups=1
formatter = logging.Formatter( formatter = logging.Formatter(
'[%(asctime)s] %(levelname)-8s %(name)s %(message)s') '[%(asctime)s] %(levelname)-8s %(name)s %(message)s')
# this partition_file_handler will be cleaned up after this try: block
partition_file_handler = logging.FileHandler(
filename="%s/instance.log" % (log_folder_path)
)
partition_file_handler.setFormatter(formatter) partition_file_handler.setFormatter(formatter)
self.logger.addHandler(partition_file_handler) self.logger.addHandler(partition_file_handler)
try: try:
self.logger.info('Processing Computer Partition %s.' % computer_partition_id) self.logger.info('Processing Computer Partition %s.' % computer_partition_id)
self.logger.info(' Software URL: %s' % software_url) self.logger.info(' Software URL: %s' % software_url)
...@@ -1153,8 +1157,9 @@ stderr_logfile_backups=1 ...@@ -1153,8 +1157,9 @@ stderr_logfile_backups=1
# updating promises state, no need to raise here # updating promises state, no need to raise here
pass pass
raise e raise e
else: finally:
self.logger.removeHandler(partition_file_handler) self.logger.removeHandler(partition_file_handler)
partition_file_handler.close()
# Run manager tear down # Run manager tear down
for manager in self._manager_list: for manager in self._manager_list:
...@@ -1162,7 +1167,8 @@ stderr_logfile_backups=1 ...@@ -1162,7 +1167,8 @@ stderr_logfile_backups=1
# If partition has been successfully processed, write timestamp # If partition has been successfully processed, write timestamp
if timestamp: if timestamp:
open(timestamp_path, 'w').write(timestamp) with open(timestamp_path, 'w') as f:
f.write(timestamp)
def FilterComputerPartitionList(self, computer_partition_list): def FilterComputerPartitionList(self, computer_partition_list):
""" """
...@@ -1492,7 +1498,8 @@ stderr_logfile_backups=1 ...@@ -1492,7 +1498,8 @@ stderr_logfile_backups=1
file_path = os.path.join(dir_reports, filename) file_path = os.path.join(dir_reports, filename)
if os.path.exists(file_path): if os.path.exists(file_path):
usage = open(file_path, 'r').read() with open(file_path, 'r') as f:
usage = f.read()
# We check the validity of xml content of each reports # We check the validity of xml content of each reports
if not self.validateXML(usage, partition_consumption_model): if not self.validateXML(usage, partition_consumption_model):
...@@ -1538,7 +1545,8 @@ stderr_logfile_backups=1 ...@@ -1538,7 +1545,8 @@ stderr_logfile_backups=1
file_path = os.path.join(computer_report_dir, filename) file_path = os.path.join(computer_report_dir, filename)
if os.path.exists(file_path): if os.path.exists(file_path):
usage = open(file_path, 'r').read() with open(file_path, 'r') as f:
usage = f.read()
if self.validateXML(usage, computer_consumption_model): if self.validateXML(usage, computer_consumption_model):
self.logger.info('XML file generated by asXML is valid') self.logger.info('XML file generated by asXML is valid')
......
...@@ -36,6 +36,7 @@ import stat ...@@ -36,6 +36,7 @@ import stat
import sys import sys
import time import time
from six.moves import xmlrpc_client as xmlrpclib from six.moves import xmlrpc_client as xmlrpclib
import contextlib
from slapos.grid.utils import (createPrivateDirectory, SlapPopen, updateFile) from slapos.grid.utils import (createPrivateDirectory, SlapPopen, updateFile)
from slapos.util import bytes2str from slapos.util import bytes2str
...@@ -43,13 +44,25 @@ from slapos.util import bytes2str ...@@ -43,13 +44,25 @@ from slapos.util import bytes2str
from supervisor import xmlrpc, states from supervisor import xmlrpc, states
@contextlib.contextmanager
def getSupervisorRPC(socket): def getSupervisorRPC(socket):
"""Get a supervisor XML-RPC connection.
Use in a context manager for proper closing of sockets.
"""
supervisor_transport = xmlrpc.SupervisorTransport('', '', supervisor_transport = xmlrpc.SupervisorTransport('', '',
'unix://' + socket) 'unix://' + socket)
server_proxy = xmlrpclib.ServerProxy('http://127.0.0.1', server_proxy = xmlrpclib.ServerProxy('http://127.0.0.1',
supervisor_transport) supervisor_transport)
return getattr(server_proxy, 'supervisor')
# python3's xmlrpc is a closing context manager, python2 is not and cannot be
# just used as a context manager as it would call __enter__ and __exit__ on
# XML-RPC.
if sys.version_info.major == 2:
yield server_proxy.supervisor
else:
with server_proxy as s:
yield s.supervisor
def _getSupervisordSocketPath(instance_root): def _getSupervisordSocketPath(instance_root):
return os.path.join(instance_root, 'supervisord.socket') return os.path.join(instance_root, 'supervisord.socket')
...@@ -116,13 +129,13 @@ def _updateWatchdog(socket): ...@@ -116,13 +129,13 @@ def _updateWatchdog(socket):
Then, when running slapgrid, the real watchdog configuration is generated. Then, when running slapgrid, the real watchdog configuration is generated.
We thus need to reload watchdog configuration if needed and start it. We thus need to reload watchdog configuration if needed and start it.
""" """
supervisor = getSupervisorRPC(socket) with getSupervisorRPC(socket) as supervisor:
if supervisor.getProcessInfo('watchdog')['state'] not in states.RUNNING_STATES: if supervisor.getProcessInfo('watchdog')['state'] not in states.RUNNING_STATES:
# XXX workaround for https://github.com/Supervisor/supervisor/issues/339 # XXX workaround for https://github.com/Supervisor/supervisor/issues/339
# In theory, only reloadConfig is needed. # In theory, only reloadConfig is needed.
supervisor.removeProcessGroup('watchdog') supervisor.removeProcessGroup('watchdog')
supervisor.reloadConfig() supervisor.reloadConfig()
supervisor.addProcessGroup('watchdog') supervisor.addProcessGroup('watchdog')
def launchSupervisord(instance_root, logger, def launchSupervisord(instance_root, logger,
supervisord_additional_argument_list=None): supervisord_additional_argument_list=None):
...@@ -132,13 +145,15 @@ def launchSupervisord(instance_root, logger, ...@@ -132,13 +145,15 @@ def launchSupervisord(instance_root, logger,
trynum = 1 trynum = 1
while trynum < 6: while trynum < 6:
try: try:
supervisor = getSupervisorRPC(socket) with getSupervisorRPC(socket) as supervisor:
status = supervisor.getState() status = supervisor.getState()
except xmlrpclib.Fault as e: except xmlrpclib.Fault as e:
if e.faultCode == 6 and e.faultString == 'SHUTDOWN_STATE': if e.faultCode == 6 and e.faultString == 'SHUTDOWN_STATE':
logger.info('Supervisor in shutdown procedure, will check again later.') logger.info('Supervisor in shutdown procedure, will check again later.')
trynum += 1 trynum += 1
time.sleep(2 * trynum) time.sleep(2 * trynum)
else:
raise
except Exception: except Exception:
# In case if there is problem with connection, assume that supervisord # In case if there is problem with connection, assume that supervisord
# is not running and try to run it # is not running and try to run it
...@@ -187,8 +202,8 @@ def launchSupervisord(instance_root, logger, ...@@ -187,8 +202,8 @@ def launchSupervisord(instance_root, logger,
while trynum < 6: while trynum < 6:
try: try:
socketlib.setdefaulttimeout(current_timeout) socketlib.setdefaulttimeout(current_timeout)
supervisor = getSupervisorRPC(socket) with getSupervisorRPC(socket) as supervisor:
status = supervisor.getState() status = supervisor.getState()
if status['statename'] == 'RUNNING' and status['statecode'] == 1: if status['statename'] == 'RUNNING' and status['statecode'] == 1:
return return
logger.warning('Wrong status name %(statename)r and code ' logger.warning('Wrong status name %(statename)r and code '
......
...@@ -312,7 +312,9 @@ def launchBuildout(path, buildout_binary, logger, ...@@ -312,7 +312,9 @@ def launchBuildout(path, buildout_binary, logger,
uid = stat_info.st_uid uid = stat_info.st_uid
gid = stat_info.st_gid gid = stat_info.st_gid
# Extract python binary to prevent shebang size limit # Extract python binary to prevent shebang size limit
line = open(buildout_binary, 'r').readline() with open(buildout_binary, 'r') as f:
line = f.readline()
invocation_list = [] invocation_list = []
if line.startswith('#!'): if line.startswith('#!'):
line = line[2:] line = line[2:]
......
...@@ -143,8 +143,10 @@ class Watchdog(object): ...@@ -143,8 +143,10 @@ class Watchdog(object):
# Partition never managed to deploy successfully, ignore bang # Partition never managed to deploy successfully, ignore bang
return True return True
last_bang_timestamp = int(open(slapos_last_bang_timestamp_file_path, 'r').read()) with open(slapos_last_bang_timestamp_file_path, 'r') as f:
deployment_timestamp = int(open(partition_timestamp_file_path, 'r').read()) last_bang_timestamp = int(f.read())
with open(partition_timestamp_file_path, 'r') as f:
deployment_timestamp = int(f.read())
if deployment_timestamp > last_bang_timestamp: if deployment_timestamp > last_bang_timestamp:
# It previously banged BEFORE latest successful deployment # It previously banged BEFORE latest successful deployment
# i.e it haven't banged since last successful deployment # i.e it haven't banged since last successful deployment
...@@ -175,10 +177,12 @@ class Watchdog(object): ...@@ -175,10 +177,12 @@ class Watchdog(object):
COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME
) )
if os.path.exists(partition_timestamp_file_path): if os.path.exists(partition_timestamp_file_path):
timestamp = open(partition_timestamp_file_path, 'r').read() with open(partition_timestamp_file_path, 'r') as f:
timestamp = f.read()
else: else:
timestamp = '0' timestamp = '0'
open(slapos_last_bang_timestamp_file_path, 'w').write(timestamp) with open(slapos_last_bang_timestamp_file_path, 'w') as f:
f.write(timestamp)
def handle_process_state_change_event(self, headers, payload_dict): def handle_process_state_change_event(self, headers, payload_dict):
partition_id = payload_dict['groupname'] partition_id = payload_dict['groupname']
......
...@@ -167,13 +167,13 @@ class Manager(object): ...@@ -167,13 +167,13 @@ class Manager(object):
partition.writeSupervisorConfigurationFile() partition.writeSupervisorConfigurationFile()
# Start processes # Start processes
supervisord = partition.getSupervisorRPC() with partition.getSupervisorRPC() as supervisor:
for program in socat_programs: for program in socat_programs:
process_name = '{}:{}'.format(group_id, program['name']) process_name = '{}:{}'.format(group_id, program['name'])
status = supervisord.getProcessInfo(process_name) status = supervisor.getProcessInfo(process_name)
if status['start'] == 0: if status['start'] == 0:
supervisord.startProcess(process_name, False) supervisor.startProcess(process_name, False)
def report(self, partition): def report(self, partition):
"""Method called at `slapos node report` phase. """Method called at `slapos node report` phase.
......
...@@ -80,16 +80,16 @@ class Manager(object): ...@@ -80,16 +80,16 @@ class Manager(object):
partition.writeSupervisorConfigurationFile() partition.writeSupervisorConfigurationFile()
# check the state of all process, if the process is not started yes, start it # check the state of all process, if the process is not started yes, start it
supervisord = partition.getSupervisorRPC() with partition.getSupervisorRPC() as supervisor:
process_list_string = "" process_list_string = ""
for name in wrapper_list: for name in wrapper_list:
process_name = '-'.join([partition_id, group_suffix]) + ':' + name process_name = '-'.join([partition_id, group_suffix]) + ':' + name
process_list_string += '%s\n' % process_name process_list_string += '%s\n' % process_name
status = supervisord.getProcessInfo(process_name) status = supervisor.getProcessInfo(process_name)
if status['start'] == 0: if status['start'] == 0:
# process is not started yet # process is not started yet
logger.info("Starting pre-delete process %r..." % name) logger.info("Starting pre-delete process %r..." % name)
supervisord.startProcess(process_name, False) supervisor.startProcess(process_name, False)
# ask to slapgrid to check theses scripts before destroy partition # ask to slapgrid to check theses scripts before destroy partition
with open(wait_filepath, 'w') as f: with open(wait_filepath, 'w') as f:
......
...@@ -185,7 +185,8 @@ class TestCliProxyShow(CliMixin): ...@@ -185,7 +185,8 @@ class TestCliProxyShow(CliMixin):
self.assertEqual('', stdout.getvalue()) self.assertEqual('', stdout.getvalue())
self.assertEqual('', stderr.getvalue()) self.assertEqual('', stderr.getvalue())
# our pager was set to output to this temporary file # our pager was set to output to this temporary file
self.assertIn('287375f0cba269902ba1bc50242839d7', open(tmp.name, 'r').read()) with open(tmp.name, 'r') as f:
self.assertIn('287375f0cba269902ba1bc50242839d7', f.read())
class TestCliNode(CliMixin): class TestCliNode(CliMixin):
......
...@@ -646,8 +646,10 @@ class RunPromise(GenericPromise): ...@@ -646,8 +646,10 @@ class RunPromise(GenericPromise):
self.assertTrue(os.path.exists(first_state_file)) self.assertTrue(os.path.exists(first_state_file))
self.assertTrue(os.path.exists(second_state_file)) self.assertTrue(os.path.exists(second_state_file))
first_result = json.load(open(first_state_file)) with open(first_state_file) as f:
second_result = json.load(open(second_state_file)) first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertEqual(first_result['name'], first_promise) self.assertEqual(first_result['name'], first_promise)
self.assertEqual(second_result['name'], second_promise) self.assertEqual(second_result['name'], second_promise)
first_date = first_result['result']['date'] first_date = first_result['result']['date']
...@@ -659,8 +661,10 @@ class RunPromise(GenericPromise): ...@@ -659,8 +661,10 @@ class RunPromise(GenericPromise):
self.launcher.run() # only my_first_promise will run but second_promise still failing self.launcher.run() # only my_first_promise will run but second_promise still failing
self.assertEqual(str(exc.exception), 'Promise %r failed.' % second_promise) self.assertEqual(str(exc.exception), 'Promise %r failed.' % second_promise)
first_result = json.load(open(first_state_file)) with open(first_state_file) as f:
second_result = json.load(open(second_state_file)) first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertNotEqual(first_result['result']['date'], first_date) self.assertNotEqual(first_result['result']['date'], first_date)
self.assertEqual(second_result['result']['date'], second_date) self.assertEqual(second_result['result']['date'], second_date)
first_date = first_result['result']['date'] first_date = first_result['result']['date']
...@@ -671,8 +675,10 @@ class RunPromise(GenericPromise): ...@@ -671,8 +675,10 @@ class RunPromise(GenericPromise):
self.launcher.run() self.launcher.run()
self.assertEqual(str(exc.exception), 'Promise %r failed.' % second_promise) self.assertEqual(str(exc.exception), 'Promise %r failed.' % second_promise)
first_result = json.load(open(first_state_file)) with open(first_state_file) as f:
second_result = json.load(open(second_state_file)) first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertNotEqual(first_result['result']['date'], first_date) self.assertNotEqual(first_result['result']['date'], first_date)
self.assertNotEqual(second_result['result']['date'], second_date) self.assertNotEqual(second_result['result']['date'], second_date)
...@@ -696,8 +702,10 @@ class RunPromise(GenericPromise): ...@@ -696,8 +702,10 @@ class RunPromise(GenericPromise):
self.assertTrue(os.path.exists(first_state_file)) self.assertTrue(os.path.exists(first_state_file))
self.assertTrue(os.path.exists(second_state_file)) self.assertTrue(os.path.exists(second_state_file))
first_result = json.load(open(first_state_file)) with open(first_state_file) as f:
second_result = json.load(open(second_state_file)) first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertEqual(first_result['name'], first_promise) self.assertEqual(first_result['name'], first_promise)
self.assertEqual(second_result['name'], second_promise) self.assertEqual(second_result['name'], second_promise)
first_date = first_result['result']['date'] first_date = first_result['result']['date']
...@@ -709,8 +717,10 @@ class RunPromise(GenericPromise): ...@@ -709,8 +717,10 @@ class RunPromise(GenericPromise):
self.launcher.run() # only my_first_promise will run but second_promise still failing self.launcher.run() # only my_first_promise will run but second_promise still failing
self.assertEqual(str(exc.exception), 'Promise %r failed.' % second_promise) self.assertEqual(str(exc.exception), 'Promise %r failed.' % second_promise)
first_result = json.load(open(first_state_file)) with open(first_state_file) as f:
second_result = json.load(open(second_state_file)) first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertNotEqual(first_result['result']['date'], first_date) self.assertNotEqual(first_result['result']['date'], first_date)
self.assertEqual(second_result['result']['date'], second_date) self.assertEqual(second_result['result']['date'], second_date)
first_date = first_result['result']['date'] first_date = first_result['result']['date']
...@@ -725,8 +735,10 @@ class RunPromise(GenericPromise): ...@@ -725,8 +735,10 @@ class RunPromise(GenericPromise):
self.configureLauncher() self.configureLauncher()
self.launcher.run() # now all succeed self.launcher.run() # now all succeed
first_result = json.load(open(first_state_file)) with open(first_state_file) as f:
second_result = json.load(open(second_state_file)) first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertNotEqual(first_result['result']['date'], first_date) self.assertNotEqual(first_result['result']['date'], first_date)
self.assertNotEqual(second_result['result']['date'], second_date) self.assertNotEqual(second_result['result']['date'], second_date)
......
...@@ -176,8 +176,9 @@ class BasicMixin(object): ...@@ -176,8 +176,9 @@ class BasicMixin(object):
def assertLogContent(self, log_path, expected, tries=600): def assertLogContent(self, log_path, expected, tries=600):
for i in range(tries): for i in range(tries):
if expected in open(log_path).read(): with open(log_path) as f:
return if expected in f.read():
return
time.sleep(0.1) time.sleep(0.1)
self.fail('%r not found in %s' % (expected, log_path)) self.fail('%r not found in %s' % (expected, log_path))
...@@ -205,7 +206,8 @@ class BasicMixin(object): ...@@ -205,7 +206,8 @@ class BasicMixin(object):
svc = os.path.join(self.instance_root, 'var', 'run', 'supervisord.pid') svc = os.path.join(self.instance_root, 'var', 'run', 'supervisord.pid')
if os.path.exists(svc): if os.path.exists(svc):
try: try:
pid = int(open(svc).read().strip()) with open(svc) as f:
pid = int(f.read().strip())
except ValueError: except ValueError:
pass pass
else: else:
...@@ -483,7 +485,8 @@ class InstanceForTest(object): ...@@ -483,7 +485,8 @@ class InstanceForTest(object):
if not os.path.isdir(promise_path): if not os.path.isdir(promise_path):
os.makedirs(promise_path) os.makedirs(promise_path)
promise = os.path.join(promise_path, promise_name) promise = os.path.join(promise_path, promise_name)
open(promise, 'w').write(promise_content) with open(promise, 'w') as f:
f.write(promise_content)
os.chmod(promise, 0o777) os.chmod(promise, 0o777)
def setCertificate(self, certificate_repository_path): def setCertificate(self, certificate_repository_path):
...@@ -492,11 +495,13 @@ class InstanceForTest(object): ...@@ -492,11 +495,13 @@ class InstanceForTest(object):
self.cert_file = os.path.join(certificate_repository_path, self.cert_file = os.path.join(certificate_repository_path,
"%s.crt" % self.name) "%s.crt" % self.name)
self.certificate = str(random.random()) self.certificate = str(random.random())
open(self.cert_file, 'w').write(self.certificate) with open(self.cert_file, 'w') as f:
f.write(self.certificate)
self.key_file = os.path.join(certificate_repository_path, self.key_file = os.path.join(certificate_repository_path,
'%s.key' % self.name) '%s.key' % self.name)
self.key = str(random.random()) self.key = str(random.random())
open(self.key_file, 'w').write(self.key) with open(self.key_file, 'w') as f:
f.write(self.key)
class SoftwareForTest(object): class SoftwareForTest(object):
...@@ -532,22 +537,24 @@ class SoftwareForTest(object): ...@@ -532,22 +537,24 @@ class SoftwareForTest(object):
""" """
Set template.cfg Set template.cfg
""" """
open(os.path.join(self.srdir, 'template.cfg'), 'w').write(template) with open(os.path.join(self.srdir, 'template.cfg'), 'w') as f:
f.write(template)
def setBuildout(self, buildout="""#!/bin/sh def setBuildout(self, buildout="""#!/bin/sh
touch worked"""): touch worked"""):
""" """
Set a buildout exec in bin Set a buildout exec in bin
""" """
open(os.path.join(self.srbindir, 'buildout'), 'w').write(buildout) with open(os.path.join(self.srbindir, 'buildout'), 'w') as f:
f.write(buildout)
os.chmod(os.path.join(self.srbindir, 'buildout'), 0o755) os.chmod(os.path.join(self.srbindir, 'buildout'), 0o755)
def setPeriodicity(self, periodicity): def setPeriodicity(self, periodicity):
""" """
Set a periodicity file Set a periodicity file
""" """
with open(os.path.join(self.srdir, 'periodicity'), 'w') as fout: with open(os.path.join(self.srdir, 'periodicity'), 'w') as f:
fout.write(str(periodicity)) f.write(str(periodicity))
@implementer(IManager) @implementer(IManager)
...@@ -873,11 +880,12 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase): ...@@ -873,11 +880,12 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
# Prepare watchdog # Prepare watchdog
self.watchdog_banged = os.path.join(self._tempdir, 'watchdog_banged') self.watchdog_banged = os.path.join(self._tempdir, 'watchdog_banged')
watchdog_path = os.path.join(self._tempdir, 'watchdog') watchdog_path = os.path.join(self._tempdir, 'watchdog')
open(watchdog_path, 'w').write(WATCHDOG_TEMPLATE.format( with open(watchdog_path, 'w') as f:
python_path=sys.executable, f.write(WATCHDOG_TEMPLATE.format(
sys_path=sys.path, python_path=sys.executable,
watchdog_banged=self.watchdog_banged sys_path=sys.path,
)) watchdog_banged=self.watchdog_banged
))
os.chmod(watchdog_path, 0o755) os.chmod(watchdog_path, 0o755)
self.grid.watchdog_path = watchdog_path self.grid.watchdog_path = watchdog_path
slapos.grid.slapgrid.WATCHDOG_PATH = watchdog_path slapos.grid.slapgrid.WATCHDOG_PATH = watchdog_path
...@@ -908,7 +916,8 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase): ...@@ -908,7 +916,8 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
daemon_log = os.path.join(partition.partition_path, '.0_daemon.log') daemon_log = os.path.join(partition.partition_path, '.0_daemon.log')
self.assertLogContent(daemon_log, 'Failing') self.assertLogContent(daemon_log, 'Failing')
self.assertIsCreated(self.watchdog_banged) self.assertIsCreated(self.watchdog_banged)
self.assertIn('daemon', open(self.watchdog_banged).read()) with open(self.watchdog_banged) as f:
self.assertIn('daemon', f.read())
def test_one_failing_daemon_in_run_will_not_bang_with_watchdog(self): def test_one_failing_daemon_in_run_will_not_bang_with_watchdog(self):
""" """
...@@ -1059,8 +1068,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase): ...@@ -1059,8 +1068,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
watchdog.handle_event(headers, payload) watchdog.handle_event(headers, payload)
self.assertEqual(instance.sequence, ['/softwareInstanceBang']) self.assertEqual(instance.sequence, ['/softwareInstanceBang'])
self.assertEqual(open(os.path.join(partition, slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)).read(), timestamp_content) with open(os.path.join(
partition,
slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)) as f:
self.assertEqual(f.read(), timestamp_content)
def test_watchdog_ignore_bang_if_partition_not_deployed(self): def test_watchdog_ignore_bang_if_partition_not_deployed(self):
""" """
...@@ -1092,8 +1103,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase): ...@@ -1092,8 +1103,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
watchdog.handle_event(headers, payload) watchdog.handle_event(headers, payload)
self.assertEqual(instance.sequence, ['/softwareInstanceBang']) self.assertEqual(instance.sequence, ['/softwareInstanceBang'])
self.assertNotEqual(open(os.path.join(partition, slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)).read(), timestamp_content) with open(os.path.join(
partition,
slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)) as f:
self.assertNotEqual(f.read(), timestamp_content)
def test_watchdog_bang_only_once_if_partition_never_deployed(self): def test_watchdog_bang_only_once_if_partition_never_deployed(self):
""" """
...@@ -1179,7 +1192,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase): ...@@ -1179,7 +1192,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
watchdog.handle_event(headers, payload) watchdog.handle_event(headers, payload)
self.assertEqual(instance.sequence, ['/softwareInstanceBang']) self.assertEqual(instance.sequence, ['/softwareInstanceBang'])
self.assertEqual(open(os.path.join(partition, slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)).read(), timestamp_content) with open(os.path.join(
partition,
slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)) as f:
self.assertEqual(f.read(), timestamp_content)
# Second bang # Second bang
event = watchdog.process_state_events[0] event = watchdog.process_state_events[0]
...@@ -1207,7 +1223,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase): ...@@ -1207,7 +1223,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
watchdog.handle_event(headers, payload) watchdog.handle_event(headers, payload)
self.assertEqual(instance.sequence, ['/softwareInstanceBang']) self.assertEqual(instance.sequence, ['/softwareInstanceBang'])
self.assertEqual(open(os.path.join(partition, slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)).read(), timestamp_content) with open(os.path.join(
partition,
slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)) as f:
self.assertEqual(f.read(), timestamp_content)
# Fourth bang # Fourth bang
event = watchdog.process_state_events[0] event = watchdog.process_state_events[0]
...@@ -1237,7 +1256,8 @@ class TestSlapgridCPPartitionProcessing(MasterMixin, unittest.TestCase): ...@@ -1237,7 +1256,8 @@ class TestSlapgridCPPartitionProcessing(MasterMixin, unittest.TestCase):
timestamp_path = os.path.join(instance.partition_path, '.timestamp') timestamp_path = os.path.join(instance.partition_path, '.timestamp')
self.setSlapgrid() self.setSlapgrid()
self.assertEqual(self.grid.processComputerPartitionList(), slapgrid.SLAPGRID_SUCCESS) self.assertEqual(self.grid.processComputerPartitionList(), slapgrid.SLAPGRID_SUCCESS)
self.assertIn(timestamp, open(timestamp_path).read()) with open(timestamp_path) as f:
self.assertIn(timestamp, f.read())
self.assertEqual(instance.sequence, self.assertEqual(instance.sequence,
['/stoppedComputerPartition']) ['/stoppedComputerPartition'])
......
...@@ -459,14 +459,16 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase): ...@@ -459,14 +459,16 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase):
software = self.createSoftware() software = self.createSoftware()
partition = self.createPartition(software.url, retention_delay=delay) partition = self.createPartition(software.url, retention_delay=delay)
partition.install() partition.install()
deployed_delay = int(open(partition.retention_lock_delay_file_path).read()) with open(partition.retention_lock_delay_file_path) as f:
deployed_delay = int(f.read())
self.assertEqual(delay, deployed_delay) self.assertEqual(delay, deployed_delay)
def test_no_retention_lock_delay(self): def test_no_retention_lock_delay(self):
software = self.createSoftware() software = self.createSoftware()
partition = self.createPartition(software.url) partition = self.createPartition(software.url)
partition.install() partition.install()
delay = open(partition.retention_lock_delay_file_path).read() with open(partition.retention_lock_delay_file_path) as f:
delay = f.read()
self.assertTrue(delay, '0') self.assertTrue(delay, '0')
self.assertTrue(partition.destroy()) self.assertTrue(partition.destroy())
...@@ -485,7 +487,8 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase): ...@@ -485,7 +487,8 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase):
partition.install() partition.install()
partition.destroy() partition.destroy()
deployed_delay = int(open(partition.retention_lock_delay_file_path).read()) with open(partition.retention_lock_delay_file_path) as f:
deployed_delay = int(f.read())
self.assertEqual(delay, deployed_delay) self.assertEqual(delay, deployed_delay)
def test_retention_lock_delay_is_respected(self): def test_retention_lock_delay_is_respected(self):
...@@ -494,7 +497,8 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase): ...@@ -494,7 +497,8 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase):
partition = self.createPartition(software.url, retention_delay=delay) partition = self.createPartition(software.url, retention_delay=delay)
partition.install() partition.install()
deployed_delay = float(open(partition.retention_lock_delay_file_path).read()) with open(partition.retention_lock_delay_file_path) as f:
deployed_delay = float(f.read())
self.assertEqual(int(delay), int(deployed_delay)) self.assertEqual(int(delay), int(deployed_delay))
self.assertFalse(partition.destroy()) self.assertFalse(partition.destroy())
...@@ -510,7 +514,8 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase): ...@@ -510,7 +514,8 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase):
partition.install() partition.install()
self.assertFalse(os.path.exists(partition.retention_lock_date_file_path)) self.assertFalse(os.path.exists(partition.retention_lock_date_file_path))
partition.destroy() partition.destroy()
deployed_date = float(open(partition.retention_lock_date_file_path).read()) with open(partition.retention_lock_date_file_path) as f:
deployed_date = float(f.read())
self.assertEqual(delay * 3600 * 24 + int(time.time()), int(deployed_date)) self.assertEqual(delay * 3600 * 24 + int(time.time()), int(deployed_date))
def test_retention_lock_date_does_not_change(self): def test_retention_lock_date_does_not_change(self):
...@@ -529,5 +534,6 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase): ...@@ -529,5 +534,6 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase):
partition.install() partition.install()
partition.destroy() partition.destroy()
deployed_date = float(open(partition.retention_lock_date_file_path).read()) with open(partition.retention_lock_date_file_path) as f:
deployed_date = float(f.read())
self.assertEqual(delay * 3600 * 24 + int(now), int(deployed_date)) self.assertEqual(delay * 3600 * 24 + int(now), int(deployed_date))
...@@ -1142,7 +1142,8 @@ database_uri = %(tempdir)s/lib/external_proxy.db ...@@ -1142,7 +1142,8 @@ database_uri = %(tempdir)s/lib/external_proxy.db
'external_proxy_host': self.external_proxy_host, 'external_proxy_host': self.external_proxy_host,
'external_proxy_port': self.external_proxy_port 'external_proxy_port': self.external_proxy_port
} }
open(self.slapos_cfg, 'w').write(configuration) with open(self.slapos_cfg, 'w') as f:
f.write(configuration)
def external_proxy_add_free_partition(self, partition_amount, computer_id=None): def external_proxy_add_free_partition(self, partition_amount, computer_id=None):
""" """
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment