Commit 72ddfb33 authored by Jérome Perrin's avatar Jérome Perrin

Fix most ResourceWarning in the test suite

https://docs.python.org/dev/whatsnew/3.2.html python3 warns about not unclosed files and sockets.

Use context managers with files to close them.

Use context manager to close socket to supervisor on python 3. On python 2 these sockets remain not closed as before.

There are a few warnings left, but output starts to be readable again.

/reviewed-on nexedi/slapos.core!78
parents 52a59437 35de3ee1
......@@ -568,7 +568,8 @@ class Partition(object):
shutil.copy(template_location, config_location)
# fill generated buildout with additional information
buildout_text = open(config_location).read()
with open(config_location) as f:
buildout_text = f.read()
buildout_text += '\n\n' + bytes2str(pkg_resources.resource_string(__name__,
'templates/buildout-tail.cfg.in')) % {
'computer_id': self.computer_id,
......@@ -580,7 +581,8 @@ class Partition(object):
'storage_home': self.instance_storage_home,
'global_ipv4_network_prefix': self.ipv4_global_network,
}
open(config_location, 'w').write(buildout_text)
with open(config_location, 'w') as f:
f.write(buildout_text)
os.chmod(config_location, 0o640)
# Try to find the best possible buildout:
# *) if software_root/bin/bootstrap exists use this one to bootstrap
......@@ -706,14 +708,16 @@ class Partition(object):
"""Asks supervisord to start the instance. If this instance is not
installed, we install it.
"""
supervisor = self.getSupervisorRPC()
partition_id = self.computer_partition.getId()
try:
supervisor.startProcessGroup(partition_id, False)
with self.getSupervisorRPC() as supervisor:
supervisor.startProcessGroup(partition_id, False)
except xmlrpclib.Fault as exc:
if exc.faultString.startswith('BAD_NAME:'):
self.logger.info("Nothing to start on %s..." %
self.computer_partition.getId())
else:
raise
else:
self.logger.info("Requested start of %s..." % self.computer_partition.getId())
......@@ -721,11 +725,13 @@ class Partition(object):
"""Asks supervisord to stop the instance."""
partition_id = self.computer_partition.getId()
try:
supervisor = self.getSupervisorRPC()
supervisor.stopProcessGroup(partition_id, False)
with self.getSupervisorRPC() as supervisor:
supervisor.stopProcessGroup(partition_id, False)
except xmlrpclib.Fault as exc:
if exc.faultString.startswith('BAD_NAME:'):
self.logger.info('Partition %s not known in supervisord, ignoring' % partition_id)
else:
raise
else:
self.logger.info("Requested stop of %s..." % self.computer_partition.getId())
......@@ -796,15 +802,18 @@ class Partition(object):
def checkProcessesFromStateList(self, process_list, state_list):
"""Asks supervisord to check if one of the processes are in the state_list."""
supervisor = self.getSupervisorRPC()
for process in process_list:
try:
info = supervisor.getProcessInfo(process)
with self.getSupervisorRPC() as supervisor:
info = supervisor.getProcessInfo(process)
if info['statename'] in state_list:
return True
except xmlrpclib.Fault as exc:
self.logger.debug("BAD process name: %r" % process)
continue
if exc.faultString.startswith('BAD_NAME:'):
self.logger.debug("BAD process name: %r" % process)
continue
else:
raise
return False
def cleanupFolder(self, folder_path):
......@@ -830,43 +839,43 @@ class Partition(object):
# In future it will not be needed, as update command
# is going to be implemented on server side.
self.logger.debug('Updating supervisord')
supervisor = self.getSupervisorRPC()
# took from supervisord.supervisorctl.do_update
result = supervisor.reloadConfig()
added, changed, removed = result[0]
for gname in removed:
results = supervisor.stopProcessGroup(gname)
fails = [res for res in results
if res['status'] == xmlrpc.Faults.FAILED]
if fails:
self.logger.warning('Problem while stopping process %r, will try later' % gname)
else:
with self.getSupervisorRPC() as supervisor:
# took from supervisord.supervisorctl.do_update
result = supervisor.reloadConfig()
added, changed, removed = result[0]
for gname in removed:
results = supervisor.stopProcessGroup(gname)
fails = [res for res in results
if res['status'] == xmlrpc.Faults.FAILED]
if fails:
self.logger.warning('Problem while stopping process %r, will try later' % gname)
else:
self.logger.info('Stopped %r' % gname)
for i in range(0, 10):
# Some process may be still running, be nice and wait for them to be stopped.
try:
supervisor.removeProcessGroup(gname)
break
except:
if i == 9:
raise
time.sleep(1)
self.logger.info('Removed %r' % gname)
for gname in changed:
results = supervisor.stopProcessGroup(gname)
self.logger.info('Stopped %r' % gname)
for i in range(0, 10):
# Some process may be still running, be nice and wait for them to be stopped.
try:
supervisor.removeProcessGroup(gname)
break
except:
if i == 9:
raise
time.sleep(1)
self.logger.info('Removed %r' % gname)
for gname in changed:
results = supervisor.stopProcessGroup(gname)
self.logger.info('Stopped %r' % gname)
supervisor.removeProcessGroup(gname)
supervisor.addProcessGroup(gname)
self.logger.info('Updated %r' % gname)
for gname in added:
supervisor.addProcessGroup(gname)
self.logger.info('Updated %r' % gname)
self.logger.debug('Supervisord updated')
supervisor.removeProcessGroup(gname)
supervisor.addProcessGroup(gname)
self.logger.info('Updated %r' % gname)
for gname in added:
supervisor.addProcessGroup(gname)
self.logger.info('Updated %r' % gname)
self.logger.debug('Supervisord updated')
def _set_ownership(self, path):
"""
......
......@@ -995,7 +995,8 @@ stderr_logfile_backups=1
periodicity_path = os.path.join(software_path, 'periodicity')
if os.path.exists(periodicity_path):
try:
periodicity = int(open(periodicity_path).read())
with open(periodicity_path) as f:
periodicity = int(f.read())
except ValueError:
os.remove(periodicity_path)
self.logger.exception('')
......@@ -1031,7 +1032,8 @@ stderr_logfile_backups=1
# changed).
if (computer_partition_id not in self.computer_partition_filter_list and
not self.develop and os.path.exists(timestamp_path)):
old_timestamp = open(timestamp_path).read()
with open(timestamp_path) as f:
old_timestamp = f.read()
last_runtime = int(os.path.getmtime(timestamp_path))
if timestamp:
try:
......@@ -1064,9 +1066,7 @@ stderr_logfile_backups=1
# Include Partition Logging
log_folder_path = "%s/.slapgrid/log" % instance_path
mkdir_p(log_folder_path)
partition_file_handler = logging.FileHandler(
filename="%s/instance.log" % (log_folder_path)
)
stat_info = os.stat(instance_path)
chownDirectory("%s/.slapgrid" % instance_path,
uid=stat_info.st_uid,
......@@ -1074,9 +1074,13 @@ stderr_logfile_backups=1
formatter = logging.Formatter(
'[%(asctime)s] %(levelname)-8s %(name)s %(message)s')
# this partition_file_handler will be cleaned up after this try: block
partition_file_handler = logging.FileHandler(
filename="%s/instance.log" % (log_folder_path)
)
partition_file_handler.setFormatter(formatter)
self.logger.addHandler(partition_file_handler)
try:
self.logger.info('Processing Computer Partition %s.' % computer_partition_id)
self.logger.info(' Software URL: %s' % software_url)
......@@ -1153,8 +1157,9 @@ stderr_logfile_backups=1
# updating promises state, no need to raise here
pass
raise e
else:
finally:
self.logger.removeHandler(partition_file_handler)
partition_file_handler.close()
# Run manager tear down
for manager in self._manager_list:
......@@ -1162,7 +1167,8 @@ stderr_logfile_backups=1
# If partition has been successfully processed, write timestamp
if timestamp:
open(timestamp_path, 'w').write(timestamp)
with open(timestamp_path, 'w') as f:
f.write(timestamp)
def FilterComputerPartitionList(self, computer_partition_list):
"""
......@@ -1492,7 +1498,8 @@ stderr_logfile_backups=1
file_path = os.path.join(dir_reports, filename)
if os.path.exists(file_path):
usage = open(file_path, 'r').read()
with open(file_path, 'r') as f:
usage = f.read()
# We check the validity of xml content of each reports
if not self.validateXML(usage, partition_consumption_model):
......@@ -1538,7 +1545,8 @@ stderr_logfile_backups=1
file_path = os.path.join(computer_report_dir, filename)
if os.path.exists(file_path):
usage = open(file_path, 'r').read()
with open(file_path, 'r') as f:
usage = f.read()
if self.validateXML(usage, computer_consumption_model):
self.logger.info('XML file generated by asXML is valid')
......
......@@ -36,6 +36,7 @@ import stat
import sys
import time
from six.moves import xmlrpc_client as xmlrpclib
import contextlib
from slapos.grid.utils import (createPrivateDirectory, SlapPopen, updateFile)
from slapos.util import bytes2str
......@@ -43,13 +44,25 @@ from slapos.util import bytes2str
from supervisor import xmlrpc, states
@contextlib.contextmanager
def getSupervisorRPC(socket):
"""Get a supervisor XML-RPC connection.
Use in a context manager for proper closing of sockets.
"""
supervisor_transport = xmlrpc.SupervisorTransport('', '',
'unix://' + socket)
server_proxy = xmlrpclib.ServerProxy('http://127.0.0.1',
supervisor_transport)
return getattr(server_proxy, 'supervisor')
# python3's xmlrpc is a closing context manager, python2 is not and cannot be
# just used as a context manager as it would call __enter__ and __exit__ on
# XML-RPC.
if sys.version_info.major == 2:
yield server_proxy.supervisor
else:
with server_proxy as s:
yield s.supervisor
def _getSupervisordSocketPath(instance_root):
return os.path.join(instance_root, 'supervisord.socket')
......@@ -116,13 +129,13 @@ def _updateWatchdog(socket):
Then, when running slapgrid, the real watchdog configuration is generated.
We thus need to reload watchdog configuration if needed and start it.
"""
supervisor = getSupervisorRPC(socket)
if supervisor.getProcessInfo('watchdog')['state'] not in states.RUNNING_STATES:
# XXX workaround for https://github.com/Supervisor/supervisor/issues/339
# In theory, only reloadConfig is needed.
supervisor.removeProcessGroup('watchdog')
supervisor.reloadConfig()
supervisor.addProcessGroup('watchdog')
with getSupervisorRPC(socket) as supervisor:
if supervisor.getProcessInfo('watchdog')['state'] not in states.RUNNING_STATES:
# XXX workaround for https://github.com/Supervisor/supervisor/issues/339
# In theory, only reloadConfig is needed.
supervisor.removeProcessGroup('watchdog')
supervisor.reloadConfig()
supervisor.addProcessGroup('watchdog')
def launchSupervisord(instance_root, logger,
supervisord_additional_argument_list=None):
......@@ -132,13 +145,15 @@ def launchSupervisord(instance_root, logger,
trynum = 1
while trynum < 6:
try:
supervisor = getSupervisorRPC(socket)
status = supervisor.getState()
with getSupervisorRPC(socket) as supervisor:
status = supervisor.getState()
except xmlrpclib.Fault as e:
if e.faultCode == 6 and e.faultString == 'SHUTDOWN_STATE':
logger.info('Supervisor in shutdown procedure, will check again later.')
trynum += 1
time.sleep(2 * trynum)
else:
raise
except Exception:
# In case if there is problem with connection, assume that supervisord
# is not running and try to run it
......@@ -187,8 +202,8 @@ def launchSupervisord(instance_root, logger,
while trynum < 6:
try:
socketlib.setdefaulttimeout(current_timeout)
supervisor = getSupervisorRPC(socket)
status = supervisor.getState()
with getSupervisorRPC(socket) as supervisor:
status = supervisor.getState()
if status['statename'] == 'RUNNING' and status['statecode'] == 1:
return
logger.warning('Wrong status name %(statename)r and code '
......
......@@ -312,7 +312,9 @@ def launchBuildout(path, buildout_binary, logger,
uid = stat_info.st_uid
gid = stat_info.st_gid
# Extract python binary to prevent shebang size limit
line = open(buildout_binary, 'r').readline()
with open(buildout_binary, 'r') as f:
line = f.readline()
invocation_list = []
if line.startswith('#!'):
line = line[2:]
......
......@@ -143,8 +143,10 @@ class Watchdog(object):
# Partition never managed to deploy successfully, ignore bang
return True
last_bang_timestamp = int(open(slapos_last_bang_timestamp_file_path, 'r').read())
deployment_timestamp = int(open(partition_timestamp_file_path, 'r').read())
with open(slapos_last_bang_timestamp_file_path, 'r') as f:
last_bang_timestamp = int(f.read())
with open(partition_timestamp_file_path, 'r') as f:
deployment_timestamp = int(f.read())
if deployment_timestamp > last_bang_timestamp:
# It previously banged BEFORE latest successful deployment
# i.e it haven't banged since last successful deployment
......@@ -175,10 +177,12 @@ class Watchdog(object):
COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME
)
if os.path.exists(partition_timestamp_file_path):
timestamp = open(partition_timestamp_file_path, 'r').read()
with open(partition_timestamp_file_path, 'r') as f:
timestamp = f.read()
else:
timestamp = '0'
open(slapos_last_bang_timestamp_file_path, 'w').write(timestamp)
with open(slapos_last_bang_timestamp_file_path, 'w') as f:
f.write(timestamp)
def handle_process_state_change_event(self, headers, payload_dict):
partition_id = payload_dict['groupname']
......
......@@ -167,13 +167,13 @@ class Manager(object):
partition.writeSupervisorConfigurationFile()
# Start processes
supervisord = partition.getSupervisorRPC()
for program in socat_programs:
process_name = '{}:{}'.format(group_id, program['name'])
status = supervisord.getProcessInfo(process_name)
with partition.getSupervisorRPC() as supervisor:
for program in socat_programs:
process_name = '{}:{}'.format(group_id, program['name'])
status = supervisor.getProcessInfo(process_name)
if status['start'] == 0:
supervisord.startProcess(process_name, False)
if status['start'] == 0:
supervisor.startProcess(process_name, False)
def report(self, partition):
"""Method called at `slapos node report` phase.
......
......@@ -80,16 +80,16 @@ class Manager(object):
partition.writeSupervisorConfigurationFile()
# check the state of all process, if the process is not started yes, start it
supervisord = partition.getSupervisorRPC()
process_list_string = ""
for name in wrapper_list:
process_name = '-'.join([partition_id, group_suffix]) + ':' + name
process_list_string += '%s\n' % process_name
status = supervisord.getProcessInfo(process_name)
if status['start'] == 0:
# process is not started yet
logger.info("Starting pre-delete process %r..." % name)
supervisord.startProcess(process_name, False)
with partition.getSupervisorRPC() as supervisor:
process_list_string = ""
for name in wrapper_list:
process_name = '-'.join([partition_id, group_suffix]) + ':' + name
process_list_string += '%s\n' % process_name
status = supervisor.getProcessInfo(process_name)
if status['start'] == 0:
# process is not started yet
logger.info("Starting pre-delete process %r..." % name)
supervisor.startProcess(process_name, False)
# ask to slapgrid to check theses scripts before destroy partition
with open(wait_filepath, 'w') as f:
......
......@@ -185,7 +185,8 @@ class TestCliProxyShow(CliMixin):
self.assertEqual('', stdout.getvalue())
self.assertEqual('', stderr.getvalue())
# our pager was set to output to this temporary file
self.assertIn('287375f0cba269902ba1bc50242839d7', open(tmp.name, 'r').read())
with open(tmp.name, 'r') as f:
self.assertIn('287375f0cba269902ba1bc50242839d7', f.read())
class TestCliNode(CliMixin):
......
......@@ -646,8 +646,10 @@ class RunPromise(GenericPromise):
self.assertTrue(os.path.exists(first_state_file))
self.assertTrue(os.path.exists(second_state_file))
first_result = json.load(open(first_state_file))
second_result = json.load(open(second_state_file))
with open(first_state_file) as f:
first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertEqual(first_result['name'], first_promise)
self.assertEqual(second_result['name'], second_promise)
first_date = first_result['result']['date']
......@@ -659,8 +661,10 @@ class RunPromise(GenericPromise):
self.launcher.run() # only my_first_promise will run but second_promise still failing
self.assertEqual(str(exc.exception), 'Promise %r failed.' % second_promise)
first_result = json.load(open(first_state_file))
second_result = json.load(open(second_state_file))
with open(first_state_file) as f:
first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertNotEqual(first_result['result']['date'], first_date)
self.assertEqual(second_result['result']['date'], second_date)
first_date = first_result['result']['date']
......@@ -671,8 +675,10 @@ class RunPromise(GenericPromise):
self.launcher.run()
self.assertEqual(str(exc.exception), 'Promise %r failed.' % second_promise)
first_result = json.load(open(first_state_file))
second_result = json.load(open(second_state_file))
with open(first_state_file) as f:
first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertNotEqual(first_result['result']['date'], first_date)
self.assertNotEqual(second_result['result']['date'], second_date)
......@@ -696,8 +702,10 @@ class RunPromise(GenericPromise):
self.assertTrue(os.path.exists(first_state_file))
self.assertTrue(os.path.exists(second_state_file))
first_result = json.load(open(first_state_file))
second_result = json.load(open(second_state_file))
with open(first_state_file) as f:
first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertEqual(first_result['name'], first_promise)
self.assertEqual(second_result['name'], second_promise)
first_date = first_result['result']['date']
......@@ -709,8 +717,10 @@ class RunPromise(GenericPromise):
self.launcher.run() # only my_first_promise will run but second_promise still failing
self.assertEqual(str(exc.exception), 'Promise %r failed.' % second_promise)
first_result = json.load(open(first_state_file))
second_result = json.load(open(second_state_file))
with open(first_state_file) as f:
first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertNotEqual(first_result['result']['date'], first_date)
self.assertEqual(second_result['result']['date'], second_date)
first_date = first_result['result']['date']
......@@ -725,8 +735,10 @@ class RunPromise(GenericPromise):
self.configureLauncher()
self.launcher.run() # now all succeed
first_result = json.load(open(first_state_file))
second_result = json.load(open(second_state_file))
with open(first_state_file) as f:
first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertNotEqual(first_result['result']['date'], first_date)
self.assertNotEqual(second_result['result']['date'], second_date)
......
......@@ -176,8 +176,9 @@ class BasicMixin(object):
def assertLogContent(self, log_path, expected, tries=600):
for i in range(tries):
if expected in open(log_path).read():
return
with open(log_path) as f:
if expected in f.read():
return
time.sleep(0.1)
self.fail('%r not found in %s' % (expected, log_path))
......@@ -205,7 +206,8 @@ class BasicMixin(object):
svc = os.path.join(self.instance_root, 'var', 'run', 'supervisord.pid')
if os.path.exists(svc):
try:
pid = int(open(svc).read().strip())
with open(svc) as f:
pid = int(f.read().strip())
except ValueError:
pass
else:
......@@ -483,7 +485,8 @@ class InstanceForTest(object):
if not os.path.isdir(promise_path):
os.makedirs(promise_path)
promise = os.path.join(promise_path, promise_name)
open(promise, 'w').write(promise_content)
with open(promise, 'w') as f:
f.write(promise_content)
os.chmod(promise, 0o777)
def setCertificate(self, certificate_repository_path):
......@@ -492,11 +495,13 @@ class InstanceForTest(object):
self.cert_file = os.path.join(certificate_repository_path,
"%s.crt" % self.name)
self.certificate = str(random.random())
open(self.cert_file, 'w').write(self.certificate)
with open(self.cert_file, 'w') as f:
f.write(self.certificate)
self.key_file = os.path.join(certificate_repository_path,
'%s.key' % self.name)
self.key = str(random.random())
open(self.key_file, 'w').write(self.key)
with open(self.key_file, 'w') as f:
f.write(self.key)
class SoftwareForTest(object):
......@@ -532,22 +537,24 @@ class SoftwareForTest(object):
"""
Set template.cfg
"""
open(os.path.join(self.srdir, 'template.cfg'), 'w').write(template)
with open(os.path.join(self.srdir, 'template.cfg'), 'w') as f:
f.write(template)
def setBuildout(self, buildout="""#!/bin/sh
touch worked"""):
"""
Set a buildout exec in bin
"""
open(os.path.join(self.srbindir, 'buildout'), 'w').write(buildout)
with open(os.path.join(self.srbindir, 'buildout'), 'w') as f:
f.write(buildout)
os.chmod(os.path.join(self.srbindir, 'buildout'), 0o755)
def setPeriodicity(self, periodicity):
"""
Set a periodicity file
"""
with open(os.path.join(self.srdir, 'periodicity'), 'w') as fout:
fout.write(str(periodicity))
with open(os.path.join(self.srdir, 'periodicity'), 'w') as f:
f.write(str(periodicity))
@implementer(IManager)
......@@ -873,11 +880,12 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
# Prepare watchdog
self.watchdog_banged = os.path.join(self._tempdir, 'watchdog_banged')
watchdog_path = os.path.join(self._tempdir, 'watchdog')
open(watchdog_path, 'w').write(WATCHDOG_TEMPLATE.format(
python_path=sys.executable,
sys_path=sys.path,
watchdog_banged=self.watchdog_banged
))
with open(watchdog_path, 'w') as f:
f.write(WATCHDOG_TEMPLATE.format(
python_path=sys.executable,
sys_path=sys.path,
watchdog_banged=self.watchdog_banged
))
os.chmod(watchdog_path, 0o755)
self.grid.watchdog_path = watchdog_path
slapos.grid.slapgrid.WATCHDOG_PATH = watchdog_path
......@@ -908,7 +916,8 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
daemon_log = os.path.join(partition.partition_path, '.0_daemon.log')
self.assertLogContent(daemon_log, 'Failing')
self.assertIsCreated(self.watchdog_banged)
self.assertIn('daemon', open(self.watchdog_banged).read())
with open(self.watchdog_banged) as f:
self.assertIn('daemon', f.read())
def test_one_failing_daemon_in_run_will_not_bang_with_watchdog(self):
"""
......@@ -1059,8 +1068,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
watchdog.handle_event(headers, payload)
self.assertEqual(instance.sequence, ['/softwareInstanceBang'])
self.assertEqual(open(os.path.join(partition, slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)).read(), timestamp_content)
with open(os.path.join(
partition,
slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)) as f:
self.assertEqual(f.read(), timestamp_content)
def test_watchdog_ignore_bang_if_partition_not_deployed(self):
"""
......@@ -1092,8 +1103,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
watchdog.handle_event(headers, payload)
self.assertEqual(instance.sequence, ['/softwareInstanceBang'])
self.assertNotEqual(open(os.path.join(partition, slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)).read(), timestamp_content)
with open(os.path.join(
partition,
slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)) as f:
self.assertNotEqual(f.read(), timestamp_content)
def test_watchdog_bang_only_once_if_partition_never_deployed(self):
"""
......@@ -1179,7 +1192,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
watchdog.handle_event(headers, payload)
self.assertEqual(instance.sequence, ['/softwareInstanceBang'])
self.assertEqual(open(os.path.join(partition, slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)).read(), timestamp_content)
with open(os.path.join(
partition,
slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)) as f:
self.assertEqual(f.read(), timestamp_content)
# Second bang
event = watchdog.process_state_events[0]
......@@ -1207,7 +1223,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
watchdog.handle_event(headers, payload)
self.assertEqual(instance.sequence, ['/softwareInstanceBang'])
self.assertEqual(open(os.path.join(partition, slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)).read(), timestamp_content)
with open(os.path.join(
partition,
slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)) as f:
self.assertEqual(f.read(), timestamp_content)
# Fourth bang
event = watchdog.process_state_events[0]
......@@ -1237,7 +1256,8 @@ class TestSlapgridCPPartitionProcessing(MasterMixin, unittest.TestCase):
timestamp_path = os.path.join(instance.partition_path, '.timestamp')
self.setSlapgrid()
self.assertEqual(self.grid.processComputerPartitionList(), slapgrid.SLAPGRID_SUCCESS)
self.assertIn(timestamp, open(timestamp_path).read())
with open(timestamp_path) as f:
self.assertIn(timestamp, f.read())
self.assertEqual(instance.sequence,
['/stoppedComputerPartition'])
......
......@@ -459,14 +459,16 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase):
software = self.createSoftware()
partition = self.createPartition(software.url, retention_delay=delay)
partition.install()
deployed_delay = int(open(partition.retention_lock_delay_file_path).read())
with open(partition.retention_lock_delay_file_path) as f:
deployed_delay = int(f.read())
self.assertEqual(delay, deployed_delay)
def test_no_retention_lock_delay(self):
software = self.createSoftware()
partition = self.createPartition(software.url)
partition.install()
delay = open(partition.retention_lock_delay_file_path).read()
with open(partition.retention_lock_delay_file_path) as f:
delay = f.read()
self.assertTrue(delay, '0')
self.assertTrue(partition.destroy())
......@@ -485,7 +487,8 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase):
partition.install()
partition.destroy()
deployed_delay = int(open(partition.retention_lock_delay_file_path).read())
with open(partition.retention_lock_delay_file_path) as f:
deployed_delay = int(f.read())
self.assertEqual(delay, deployed_delay)
def test_retention_lock_delay_is_respected(self):
......@@ -494,7 +497,8 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase):
partition = self.createPartition(software.url, retention_delay=delay)
partition.install()
deployed_delay = float(open(partition.retention_lock_delay_file_path).read())
with open(partition.retention_lock_delay_file_path) as f:
deployed_delay = float(f.read())
self.assertEqual(int(delay), int(deployed_delay))
self.assertFalse(partition.destroy())
......@@ -510,7 +514,8 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase):
partition.install()
self.assertFalse(os.path.exists(partition.retention_lock_date_file_path))
partition.destroy()
deployed_date = float(open(partition.retention_lock_date_file_path).read())
with open(partition.retention_lock_date_file_path) as f:
deployed_date = float(f.read())
self.assertEqual(delay * 3600 * 24 + int(time.time()), int(deployed_date))
def test_retention_lock_date_does_not_change(self):
......@@ -529,5 +534,6 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase):
partition.install()
partition.destroy()
deployed_date = float(open(partition.retention_lock_date_file_path).read())
with open(partition.retention_lock_date_file_path) as f:
deployed_date = float(f.read())
self.assertEqual(delay * 3600 * 24 + int(now), int(deployed_date))
......@@ -1142,7 +1142,8 @@ database_uri = %(tempdir)s/lib/external_proxy.db
'external_proxy_host': self.external_proxy_host,
'external_proxy_port': self.external_proxy_port
}
open(self.slapos_cfg, 'w').write(configuration)
with open(self.slapos_cfg, 'w') as f:
f.write(configuration)
def external_proxy_add_free_partition(self, partition_amount, computer_id=None):
"""
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment