Commit 0998e7f1 authored by Jérome Perrin's avatar Jérome Perrin

*: explictly close file descriptors

They will be closed in __del__, probably soon enough, but it's cleaner
to close explicity. This also stops the ResourceWarning on python3
parent 812d00c4
...@@ -568,7 +568,8 @@ class Partition(object): ...@@ -568,7 +568,8 @@ class Partition(object):
shutil.copy(template_location, config_location) shutil.copy(template_location, config_location)
# fill generated buildout with additional information # fill generated buildout with additional information
buildout_text = open(config_location).read() with open(config_location) as f:
buildout_text = f.read()
buildout_text += '\n\n' + bytes2str(pkg_resources.resource_string(__name__, buildout_text += '\n\n' + bytes2str(pkg_resources.resource_string(__name__,
'templates/buildout-tail.cfg.in')) % { 'templates/buildout-tail.cfg.in')) % {
'computer_id': self.computer_id, 'computer_id': self.computer_id,
...@@ -580,7 +581,8 @@ class Partition(object): ...@@ -580,7 +581,8 @@ class Partition(object):
'storage_home': self.instance_storage_home, 'storage_home': self.instance_storage_home,
'global_ipv4_network_prefix': self.ipv4_global_network, 'global_ipv4_network_prefix': self.ipv4_global_network,
} }
open(config_location, 'w').write(buildout_text) with open(config_location, 'w') as f:
f.write(buildout_text)
os.chmod(config_location, 0o640) os.chmod(config_location, 0o640)
# Try to find the best possible buildout: # Try to find the best possible buildout:
# *) if software_root/bin/bootstrap exists use this one to bootstrap # *) if software_root/bin/bootstrap exists use this one to bootstrap
......
...@@ -995,7 +995,8 @@ stderr_logfile_backups=1 ...@@ -995,7 +995,8 @@ stderr_logfile_backups=1
periodicity_path = os.path.join(software_path, 'periodicity') periodicity_path = os.path.join(software_path, 'periodicity')
if os.path.exists(periodicity_path): if os.path.exists(periodicity_path):
try: try:
periodicity = int(open(periodicity_path).read()) with open(periodicity_path) as f:
periodicity = int(f.read())
except ValueError: except ValueError:
os.remove(periodicity_path) os.remove(periodicity_path)
self.logger.exception('') self.logger.exception('')
...@@ -1031,7 +1032,8 @@ stderr_logfile_backups=1 ...@@ -1031,7 +1032,8 @@ stderr_logfile_backups=1
# changed). # changed).
if (computer_partition_id not in self.computer_partition_filter_list and if (computer_partition_id not in self.computer_partition_filter_list and
not self.develop and os.path.exists(timestamp_path)): not self.develop and os.path.exists(timestamp_path)):
old_timestamp = open(timestamp_path).read() with open(timestamp_path) as f:
old_timestamp = f.read()
last_runtime = int(os.path.getmtime(timestamp_path)) last_runtime = int(os.path.getmtime(timestamp_path))
if timestamp: if timestamp:
try: try:
...@@ -1162,7 +1164,8 @@ stderr_logfile_backups=1 ...@@ -1162,7 +1164,8 @@ stderr_logfile_backups=1
# If partition has been successfully processed, write timestamp # If partition has been successfully processed, write timestamp
if timestamp: if timestamp:
open(timestamp_path, 'w').write(timestamp) with open(timestamp_path, 'w') as f:
f.write(timestamp)
def FilterComputerPartitionList(self, computer_partition_list): def FilterComputerPartitionList(self, computer_partition_list):
""" """
...@@ -1492,7 +1495,8 @@ stderr_logfile_backups=1 ...@@ -1492,7 +1495,8 @@ stderr_logfile_backups=1
file_path = os.path.join(dir_reports, filename) file_path = os.path.join(dir_reports, filename)
if os.path.exists(file_path): if os.path.exists(file_path):
usage = open(file_path, 'r').read() with open(file_path, 'r') as f:
usage = f.read()
# We check the validity of xml content of each reports # We check the validity of xml content of each reports
if not self.validateXML(usage, partition_consumption_model): if not self.validateXML(usage, partition_consumption_model):
...@@ -1538,7 +1542,8 @@ stderr_logfile_backups=1 ...@@ -1538,7 +1542,8 @@ stderr_logfile_backups=1
file_path = os.path.join(computer_report_dir, filename) file_path = os.path.join(computer_report_dir, filename)
if os.path.exists(file_path): if os.path.exists(file_path):
usage = open(file_path, 'r').read() with open(file_path, 'r') as f:
usage = f.read()
if self.validateXML(usage, computer_consumption_model): if self.validateXML(usage, computer_consumption_model):
self.logger.info('XML file generated by asXML is valid') self.logger.info('XML file generated by asXML is valid')
......
...@@ -312,7 +312,9 @@ def launchBuildout(path, buildout_binary, logger, ...@@ -312,7 +312,9 @@ def launchBuildout(path, buildout_binary, logger,
uid = stat_info.st_uid uid = stat_info.st_uid
gid = stat_info.st_gid gid = stat_info.st_gid
# Extract python binary to prevent shebang size limit # Extract python binary to prevent shebang size limit
line = open(buildout_binary, 'r').readline() with open(buildout_binary, 'r') as f:
line = f.readline()
invocation_list = [] invocation_list = []
if line.startswith('#!'): if line.startswith('#!'):
line = line[2:] line = line[2:]
......
...@@ -143,8 +143,10 @@ class Watchdog(object): ...@@ -143,8 +143,10 @@ class Watchdog(object):
# Partition never managed to deploy successfully, ignore bang # Partition never managed to deploy successfully, ignore bang
return True return True
last_bang_timestamp = int(open(slapos_last_bang_timestamp_file_path, 'r').read()) with open(slapos_last_bang_timestamp_file_path, 'r') as f:
deployment_timestamp = int(open(partition_timestamp_file_path, 'r').read()) last_bang_timestamp = int(f.read())
with open(partition_timestamp_file_path, 'r') as f:
deployment_timestamp = int(f.read())
if deployment_timestamp > last_bang_timestamp: if deployment_timestamp > last_bang_timestamp:
# It previously banged BEFORE latest successful deployment # It previously banged BEFORE latest successful deployment
# i.e it haven't banged since last successful deployment # i.e it haven't banged since last successful deployment
...@@ -175,10 +177,12 @@ class Watchdog(object): ...@@ -175,10 +177,12 @@ class Watchdog(object):
COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME
) )
if os.path.exists(partition_timestamp_file_path): if os.path.exists(partition_timestamp_file_path):
timestamp = open(partition_timestamp_file_path, 'r').read() with open(partition_timestamp_file_path, 'r') as f:
timestamp = f.read()
else: else:
timestamp = '0' timestamp = '0'
open(slapos_last_bang_timestamp_file_path, 'w').write(timestamp) with open(slapos_last_bang_timestamp_file_path, 'w') as f:
f.write(timestamp)
def handle_process_state_change_event(self, headers, payload_dict): def handle_process_state_change_event(self, headers, payload_dict):
partition_id = payload_dict['groupname'] partition_id = payload_dict['groupname']
......
...@@ -185,7 +185,8 @@ class TestCliProxyShow(CliMixin): ...@@ -185,7 +185,8 @@ class TestCliProxyShow(CliMixin):
self.assertEqual('', stdout.getvalue()) self.assertEqual('', stdout.getvalue())
self.assertEqual('', stderr.getvalue()) self.assertEqual('', stderr.getvalue())
# our pager was set to output to this temporary file # our pager was set to output to this temporary file
self.assertIn('287375f0cba269902ba1bc50242839d7', open(tmp.name, 'r').read()) with open(tmp.name, 'r') as f:
self.assertIn('287375f0cba269902ba1bc50242839d7', f.read())
class TestCliNode(CliMixin): class TestCliNode(CliMixin):
......
...@@ -646,8 +646,10 @@ class RunPromise(GenericPromise): ...@@ -646,8 +646,10 @@ class RunPromise(GenericPromise):
self.assertTrue(os.path.exists(first_state_file)) self.assertTrue(os.path.exists(first_state_file))
self.assertTrue(os.path.exists(second_state_file)) self.assertTrue(os.path.exists(second_state_file))
first_result = json.load(open(first_state_file)) with open(first_state_file) as f:
second_result = json.load(open(second_state_file)) first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertEqual(first_result['name'], first_promise) self.assertEqual(first_result['name'], first_promise)
self.assertEqual(second_result['name'], second_promise) self.assertEqual(second_result['name'], second_promise)
first_date = first_result['result']['date'] first_date = first_result['result']['date']
...@@ -659,8 +661,10 @@ class RunPromise(GenericPromise): ...@@ -659,8 +661,10 @@ class RunPromise(GenericPromise):
self.launcher.run() # only my_first_promise will run but second_promise still failing self.launcher.run() # only my_first_promise will run but second_promise still failing
self.assertEqual(str(exc.exception), 'Promise %r failed.' % second_promise) self.assertEqual(str(exc.exception), 'Promise %r failed.' % second_promise)
first_result = json.load(open(first_state_file)) with open(first_state_file) as f:
second_result = json.load(open(second_state_file)) first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertNotEqual(first_result['result']['date'], first_date) self.assertNotEqual(first_result['result']['date'], first_date)
self.assertEqual(second_result['result']['date'], second_date) self.assertEqual(second_result['result']['date'], second_date)
first_date = first_result['result']['date'] first_date = first_result['result']['date']
...@@ -671,8 +675,10 @@ class RunPromise(GenericPromise): ...@@ -671,8 +675,10 @@ class RunPromise(GenericPromise):
self.launcher.run() self.launcher.run()
self.assertEqual(str(exc.exception), 'Promise %r failed.' % second_promise) self.assertEqual(str(exc.exception), 'Promise %r failed.' % second_promise)
first_result = json.load(open(first_state_file)) with open(first_state_file) as f:
second_result = json.load(open(second_state_file)) first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertNotEqual(first_result['result']['date'], first_date) self.assertNotEqual(first_result['result']['date'], first_date)
self.assertNotEqual(second_result['result']['date'], second_date) self.assertNotEqual(second_result['result']['date'], second_date)
...@@ -696,8 +702,10 @@ class RunPromise(GenericPromise): ...@@ -696,8 +702,10 @@ class RunPromise(GenericPromise):
self.assertTrue(os.path.exists(first_state_file)) self.assertTrue(os.path.exists(first_state_file))
self.assertTrue(os.path.exists(second_state_file)) self.assertTrue(os.path.exists(second_state_file))
first_result = json.load(open(first_state_file)) with open(first_state_file) as f:
second_result = json.load(open(second_state_file)) first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertEqual(first_result['name'], first_promise) self.assertEqual(first_result['name'], first_promise)
self.assertEqual(second_result['name'], second_promise) self.assertEqual(second_result['name'], second_promise)
first_date = first_result['result']['date'] first_date = first_result['result']['date']
...@@ -709,8 +717,10 @@ class RunPromise(GenericPromise): ...@@ -709,8 +717,10 @@ class RunPromise(GenericPromise):
self.launcher.run() # only my_first_promise will run but second_promise still failing self.launcher.run() # only my_first_promise will run but second_promise still failing
self.assertEqual(str(exc.exception), 'Promise %r failed.' % second_promise) self.assertEqual(str(exc.exception), 'Promise %r failed.' % second_promise)
first_result = json.load(open(first_state_file)) with open(first_state_file) as f:
second_result = json.load(open(second_state_file)) first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertNotEqual(first_result['result']['date'], first_date) self.assertNotEqual(first_result['result']['date'], first_date)
self.assertEqual(second_result['result']['date'], second_date) self.assertEqual(second_result['result']['date'], second_date)
first_date = first_result['result']['date'] first_date = first_result['result']['date']
...@@ -725,8 +735,10 @@ class RunPromise(GenericPromise): ...@@ -725,8 +735,10 @@ class RunPromise(GenericPromise):
self.configureLauncher() self.configureLauncher()
self.launcher.run() # now all succeed self.launcher.run() # now all succeed
first_result = json.load(open(first_state_file)) with open(first_state_file) as f:
second_result = json.load(open(second_state_file)) first_result = json.load(f)
with open(second_state_file) as f:
second_result = json.load(f)
self.assertNotEqual(first_result['result']['date'], first_date) self.assertNotEqual(first_result['result']['date'], first_date)
self.assertNotEqual(second_result['result']['date'], second_date) self.assertNotEqual(second_result['result']['date'], second_date)
......
...@@ -176,7 +176,8 @@ class BasicMixin(object): ...@@ -176,7 +176,8 @@ class BasicMixin(object):
def assertLogContent(self, log_path, expected, tries=600): def assertLogContent(self, log_path, expected, tries=600):
for i in range(tries): for i in range(tries):
if expected in open(log_path).read(): with open(log_path) as f:
if expected in f.read():
return return
time.sleep(0.1) time.sleep(0.1)
self.fail('%r not found in %s' % (expected, log_path)) self.fail('%r not found in %s' % (expected, log_path))
...@@ -205,7 +206,8 @@ class BasicMixin(object): ...@@ -205,7 +206,8 @@ class BasicMixin(object):
svc = os.path.join(self.instance_root, 'var', 'run', 'supervisord.pid') svc = os.path.join(self.instance_root, 'var', 'run', 'supervisord.pid')
if os.path.exists(svc): if os.path.exists(svc):
try: try:
pid = int(open(svc).read().strip()) with open(svc) as f:
pid = int(f.read().strip())
except ValueError: except ValueError:
pass pass
else: else:
...@@ -483,7 +485,8 @@ class InstanceForTest(object): ...@@ -483,7 +485,8 @@ class InstanceForTest(object):
if not os.path.isdir(promise_path): if not os.path.isdir(promise_path):
os.makedirs(promise_path) os.makedirs(promise_path)
promise = os.path.join(promise_path, promise_name) promise = os.path.join(promise_path, promise_name)
open(promise, 'w').write(promise_content) with open(promise, 'w') as f:
f.write(promise_content)
os.chmod(promise, 0o777) os.chmod(promise, 0o777)
def setCertificate(self, certificate_repository_path): def setCertificate(self, certificate_repository_path):
...@@ -492,11 +495,13 @@ class InstanceForTest(object): ...@@ -492,11 +495,13 @@ class InstanceForTest(object):
self.cert_file = os.path.join(certificate_repository_path, self.cert_file = os.path.join(certificate_repository_path,
"%s.crt" % self.name) "%s.crt" % self.name)
self.certificate = str(random.random()) self.certificate = str(random.random())
open(self.cert_file, 'w').write(self.certificate) with open(self.cert_file, 'w') as f:
f.write(self.certificate)
self.key_file = os.path.join(certificate_repository_path, self.key_file = os.path.join(certificate_repository_path,
'%s.key' % self.name) '%s.key' % self.name)
self.key = str(random.random()) self.key = str(random.random())
open(self.key_file, 'w').write(self.key) with open(self.key_file, 'w') as f:
f.write(self.key)
class SoftwareForTest(object): class SoftwareForTest(object):
...@@ -532,22 +537,24 @@ class SoftwareForTest(object): ...@@ -532,22 +537,24 @@ class SoftwareForTest(object):
""" """
Set template.cfg Set template.cfg
""" """
open(os.path.join(self.srdir, 'template.cfg'), 'w').write(template) with open(os.path.join(self.srdir, 'template.cfg'), 'w') as f:
f.write(template)
def setBuildout(self, buildout="""#!/bin/sh def setBuildout(self, buildout="""#!/bin/sh
touch worked"""): touch worked"""):
""" """
Set a buildout exec in bin Set a buildout exec in bin
""" """
open(os.path.join(self.srbindir, 'buildout'), 'w').write(buildout) with open(os.path.join(self.srbindir, 'buildout'), 'w') as f:
f.write(buildout)
os.chmod(os.path.join(self.srbindir, 'buildout'), 0o755) os.chmod(os.path.join(self.srbindir, 'buildout'), 0o755)
def setPeriodicity(self, periodicity): def setPeriodicity(self, periodicity):
""" """
Set a periodicity file Set a periodicity file
""" """
with open(os.path.join(self.srdir, 'periodicity'), 'w') as fout: with open(os.path.join(self.srdir, 'periodicity'), 'w') as f:
fout.write(str(periodicity)) f.write(str(periodicity))
@implementer(IManager) @implementer(IManager)
...@@ -873,7 +880,8 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase): ...@@ -873,7 +880,8 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
# Prepare watchdog # Prepare watchdog
self.watchdog_banged = os.path.join(self._tempdir, 'watchdog_banged') self.watchdog_banged = os.path.join(self._tempdir, 'watchdog_banged')
watchdog_path = os.path.join(self._tempdir, 'watchdog') watchdog_path = os.path.join(self._tempdir, 'watchdog')
open(watchdog_path, 'w').write(WATCHDOG_TEMPLATE.format( with open(watchdog_path, 'w') as f:
f.write(WATCHDOG_TEMPLATE.format(
python_path=sys.executable, python_path=sys.executable,
sys_path=sys.path, sys_path=sys.path,
watchdog_banged=self.watchdog_banged watchdog_banged=self.watchdog_banged
...@@ -908,7 +916,8 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase): ...@@ -908,7 +916,8 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
daemon_log = os.path.join(partition.partition_path, '.0_daemon.log') daemon_log = os.path.join(partition.partition_path, '.0_daemon.log')
self.assertLogContent(daemon_log, 'Failing') self.assertLogContent(daemon_log, 'Failing')
self.assertIsCreated(self.watchdog_banged) self.assertIsCreated(self.watchdog_banged)
self.assertIn('daemon', open(self.watchdog_banged).read()) with open(self.watchdog_banged) as f:
self.assertIn('daemon', f.read())
def test_one_failing_daemon_in_run_will_not_bang_with_watchdog(self): def test_one_failing_daemon_in_run_will_not_bang_with_watchdog(self):
""" """
...@@ -1059,8 +1068,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase): ...@@ -1059,8 +1068,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
watchdog.handle_event(headers, payload) watchdog.handle_event(headers, payload)
self.assertEqual(instance.sequence, ['/softwareInstanceBang']) self.assertEqual(instance.sequence, ['/softwareInstanceBang'])
self.assertEqual(open(os.path.join(partition, slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)).read(), timestamp_content) with open(os.path.join(
partition,
slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)) as f:
self.assertEqual(f.read(), timestamp_content)
def test_watchdog_ignore_bang_if_partition_not_deployed(self): def test_watchdog_ignore_bang_if_partition_not_deployed(self):
""" """
...@@ -1092,8 +1103,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase): ...@@ -1092,8 +1103,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
watchdog.handle_event(headers, payload) watchdog.handle_event(headers, payload)
self.assertEqual(instance.sequence, ['/softwareInstanceBang']) self.assertEqual(instance.sequence, ['/softwareInstanceBang'])
self.assertNotEqual(open(os.path.join(partition, slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)).read(), timestamp_content) with open(os.path.join(
partition,
slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)) as f:
self.assertNotEqual(f.read(), timestamp_content)
def test_watchdog_bang_only_once_if_partition_never_deployed(self): def test_watchdog_bang_only_once_if_partition_never_deployed(self):
""" """
...@@ -1179,7 +1192,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase): ...@@ -1179,7 +1192,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
watchdog.handle_event(headers, payload) watchdog.handle_event(headers, payload)
self.assertEqual(instance.sequence, ['/softwareInstanceBang']) self.assertEqual(instance.sequence, ['/softwareInstanceBang'])
self.assertEqual(open(os.path.join(partition, slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)).read(), timestamp_content) with open(os.path.join(
partition,
slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)) as f:
self.assertEqual(f.read(), timestamp_content)
# Second bang # Second bang
event = watchdog.process_state_events[0] event = watchdog.process_state_events[0]
...@@ -1207,7 +1223,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase): ...@@ -1207,7 +1223,10 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
watchdog.handle_event(headers, payload) watchdog.handle_event(headers, payload)
self.assertEqual(instance.sequence, ['/softwareInstanceBang']) self.assertEqual(instance.sequence, ['/softwareInstanceBang'])
self.assertEqual(open(os.path.join(partition, slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)).read(), timestamp_content) with open(os.path.join(
partition,
slapos.grid.slapgrid.COMPUTER_PARTITION_LATEST_BANG_TIMESTAMP_FILENAME)) as f:
self.assertEqual(f.read(), timestamp_content)
# Fourth bang # Fourth bang
event = watchdog.process_state_events[0] event = watchdog.process_state_events[0]
...@@ -1237,7 +1256,8 @@ class TestSlapgridCPPartitionProcessing(MasterMixin, unittest.TestCase): ...@@ -1237,7 +1256,8 @@ class TestSlapgridCPPartitionProcessing(MasterMixin, unittest.TestCase):
timestamp_path = os.path.join(instance.partition_path, '.timestamp') timestamp_path = os.path.join(instance.partition_path, '.timestamp')
self.setSlapgrid() self.setSlapgrid()
self.assertEqual(self.grid.processComputerPartitionList(), slapgrid.SLAPGRID_SUCCESS) self.assertEqual(self.grid.processComputerPartitionList(), slapgrid.SLAPGRID_SUCCESS)
self.assertIn(timestamp, open(timestamp_path).read()) with open(timestamp_path) as f:
self.assertIn(timestamp, f.read())
self.assertEqual(instance.sequence, self.assertEqual(instance.sequence,
['/stoppedComputerPartition']) ['/stoppedComputerPartition'])
......
...@@ -459,14 +459,16 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase): ...@@ -459,14 +459,16 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase):
software = self.createSoftware() software = self.createSoftware()
partition = self.createPartition(software.url, retention_delay=delay) partition = self.createPartition(software.url, retention_delay=delay)
partition.install() partition.install()
deployed_delay = int(open(partition.retention_lock_delay_file_path).read()) with open(partition.retention_lock_delay_file_path) as f:
deployed_delay = int(f.read())
self.assertEqual(delay, deployed_delay) self.assertEqual(delay, deployed_delay)
def test_no_retention_lock_delay(self): def test_no_retention_lock_delay(self):
software = self.createSoftware() software = self.createSoftware()
partition = self.createPartition(software.url) partition = self.createPartition(software.url)
partition.install() partition.install()
delay = open(partition.retention_lock_delay_file_path).read() with open(partition.retention_lock_delay_file_path) as f:
delay = f.read()
self.assertTrue(delay, '0') self.assertTrue(delay, '0')
self.assertTrue(partition.destroy()) self.assertTrue(partition.destroy())
...@@ -485,7 +487,8 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase): ...@@ -485,7 +487,8 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase):
partition.install() partition.install()
partition.destroy() partition.destroy()
deployed_delay = int(open(partition.retention_lock_delay_file_path).read()) with open(partition.retention_lock_delay_file_path) as f:
deployed_delay = int(f.read())
self.assertEqual(delay, deployed_delay) self.assertEqual(delay, deployed_delay)
def test_retention_lock_delay_is_respected(self): def test_retention_lock_delay_is_respected(self):
...@@ -494,7 +497,8 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase): ...@@ -494,7 +497,8 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase):
partition = self.createPartition(software.url, retention_delay=delay) partition = self.createPartition(software.url, retention_delay=delay)
partition.install() partition.install()
deployed_delay = float(open(partition.retention_lock_delay_file_path).read()) with open(partition.retention_lock_delay_file_path) as f:
deployed_delay = float(f.read())
self.assertEqual(int(delay), int(deployed_delay)) self.assertEqual(int(delay), int(deployed_delay))
self.assertFalse(partition.destroy()) self.assertFalse(partition.destroy())
...@@ -510,7 +514,8 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase): ...@@ -510,7 +514,8 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase):
partition.install() partition.install()
self.assertFalse(os.path.exists(partition.retention_lock_date_file_path)) self.assertFalse(os.path.exists(partition.retention_lock_date_file_path))
partition.destroy() partition.destroy()
deployed_date = float(open(partition.retention_lock_date_file_path).read()) with open(partition.retention_lock_date_file_path) as f:
deployed_date = float(f.read())
self.assertEqual(delay * 3600 * 24 + int(time.time()), int(deployed_date)) self.assertEqual(delay * 3600 * 24 + int(time.time()), int(deployed_date))
def test_retention_lock_date_does_not_change(self): def test_retention_lock_date_does_not_change(self):
...@@ -529,5 +534,6 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase): ...@@ -529,5 +534,6 @@ class TestPartitionDestructionLock(MasterMixin, unittest.TestCase):
partition.install() partition.install()
partition.destroy() partition.destroy()
deployed_date = float(open(partition.retention_lock_date_file_path).read()) with open(partition.retention_lock_date_file_path) as f:
deployed_date = float(f.read())
self.assertEqual(delay * 3600 * 24 + int(now), int(deployed_date)) self.assertEqual(delay * 3600 * 24 + int(now), int(deployed_date))
...@@ -1142,7 +1142,8 @@ database_uri = %(tempdir)s/lib/external_proxy.db ...@@ -1142,7 +1142,8 @@ database_uri = %(tempdir)s/lib/external_proxy.db
'external_proxy_host': self.external_proxy_host, 'external_proxy_host': self.external_proxy_host,
'external_proxy_port': self.external_proxy_port 'external_proxy_port': self.external_proxy_port
} }
open(self.slapos_cfg, 'w').write(configuration) with open(self.slapos_cfg, 'w') as f:
f.write(configuration)
def external_proxy_add_free_partition(self, partition_amount, computer_id=None): def external_proxy_add_free_partition(self, partition_amount, computer_id=None):
""" """
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment