Commit 541da74e authored by Marius Gedminas's avatar Marius Gedminas

Use with open(...) for peace of mind

Fixes some more of those ResourceWarnings
parent 2daccd5c
......@@ -53,9 +53,8 @@ def replace(filename, pat, repl):
# e.g., ZEO 2.2.7 shipped with ZODB 3.2.7. Now ZEO and ZODB share their
# version number.
def write_zeoversion(path, version):
f = open(path, "w")
with open(path, "w") as f:
print >> f, version
f.close()
def main(args):
version, date = args
......
......@@ -213,13 +213,13 @@ DemoStorage supports Blobs if the changes database supports blobs.
>>> db = DB(storage)
>>> conn = db.open()
>>> conn.root()['blob'].open().read()
>>> with conn.root()['blob'].open() as fp: fp.read()
'state 1'
>>> _ = transaction.begin()
>>> with conn.root()['blob'].open('w') as file:
... _ = file.write(b'state 2')
>>> transaction.commit()
>>> conn.root()['blob'].open().read()
>>> with conn.root()['blob'].open() as fp: fp.read()
'state 2'
>>> storage.temporaryDirectory() == changes.temporaryDirectory()
......@@ -234,14 +234,14 @@ It isn't necessary for the base database to support blobs.
>>> storage = DemoStorage(base=base, changes=changes)
>>> db = DB(storage)
>>> conn = db.open()
>>> conn.root()['blob'].open().read()
>>> with conn.root()['blob'].open() as fp: fp.read()
'state 2'
>>> _ = transaction.begin()
>>> conn.root()['blob2'] = ZODB.blob.Blob()
>>> with conn.root()['blob2'].open('w') as file:
... _ = file.write(b'state 1')
>>> conn.root()['blob2'].open().read()
>>> with conn.root()['blob2'].open() as fp: fp.read()
'state 1'
>>> db.close()
......@@ -258,7 +258,7 @@ storage wrapped around it when necessary:
>>> db = DB(storage)
>>> conn = db.open()
>>> conn.root()['blob'].open().read()
>>> with conn.root()['blob'].open() as fp: fp.read()
'state 1'
>>> type(storage.changes).__name__
......@@ -268,7 +268,7 @@ storage wrapped around it when necessary:
>>> with conn.root()['blob'].open('w') as file:
... _ = file.write(b'state 2')
>>> transaction.commit()
>>> conn.root()['blob'].open().read()
>>> with conn.root()['blob'].open() as fp: fp.read()
'state 2'
>>> storage.temporaryDirectory() == storage.changes.temporaryDirectory()
......
......@@ -139,7 +139,7 @@ def pack_with_repeated_blob_records():
>>> db.pack()
>>> conn.sync()
>>> conn.root()[1].open().read()
>>> with conn.root()[1].open() as fp: fp.read()
'some data'
>>> db.close()
......
......@@ -855,11 +855,9 @@ class BlobStorage(BlobStorageMixin):
data, serial_before, serial_after = load_result
orig_fn = self.fshelper.getBlobFilename(oid, serial_before)
new_fn = self.fshelper.getBlobFilename(oid, undo_serial)
orig = open(orig_fn, "rb")
new = open(new_fn, "wb")
with open(orig_fn, "rb") as orig:
with open(new_fn, "wb") as new:
utils.cp(orig, new)
orig.close()
new.close()
self.dirty_oids.append((oid, undo_serial))
finally:
......@@ -890,13 +888,9 @@ def rename_or_copy_blob(f1, f2, chmod=True):
os.rename(f1, f2)
except OSError:
copied("Copied blob file %r to %r.", f1, f2)
file1 = open(f1, 'rb')
file2 = open(f2, 'wb')
try:
with open(f1, 'rb') as file1:
with open(f2, 'wb') as file2:
utils.cp(file1, file2)
finally:
file1.close()
file2.close()
remove_committed(f1)
if chmod:
os.chmod(f2, stat.S_IREAD)
......
......@@ -220,11 +220,12 @@ def mailfolder(app, mboxname, number):
def VmSize():
try:
f = open('/proc/%s/status' % os.getpid())
with open('/proc/%s/status' % os.getpid()) as f:
lines = f.readlines()
except:
return 0
else:
l = list(filter(lambda l: l[:7] == 'VmSize:', f.readlines()))
l = list(filter(lambda l: l[:7] == 'VmSize:', lines))
if l:
l = l[0][7:].strip().split()[0]
return int(l)
......@@ -699,7 +700,8 @@ def collect_options(args, jobs, options):
if name == 'options':
fname = args.pop(0)
d = {}
exec(compile(open(fname).read(), fname, 'exec'), d)
with open(fname) as fp:
exec(compile(fp.read(), fname, 'exec'), d)
collect_options(list(d['options']), jobs, options)
elif name in options:
v = args.pop(0)
......
......@@ -25,9 +25,7 @@ from ZODB.utils import U64, p64
from transaction import Transaction
import sys
import itertools
import ZODB.blob
import six
try:
from itertools import izip as zip
......@@ -234,7 +232,9 @@ class IteratorDeepCompare:
else:
fn2 = storage2.loadBlob(rec1.oid, rec1.tid)
self.assertTrue(fn1 != fn2)
eq(open(fn1, 'rb').read(), open(fn2, 'rb').read())
with open(fn1, 'rb') as fp1:
with open(fn2, 'rb') as fp2:
eq(fp1.read(), fp2.read())
# Make sure there are no more records left in rec1 and rec2,
# meaning they were the same length.
......
......@@ -28,7 +28,7 @@ A blob implements the IBlob interface::
We can open a new blob file for reading, but it won't have any data::
>>> myblob.open("r").read()
>>> with myblob.open("r") as fp: fp.read()
''
But we can write data to a new Blob by opening it for writing::
......@@ -105,7 +105,7 @@ to it via a name. If the first line in the following test kept a reference
around via a name, the second call to open it in a writable mode would fail
with a BlobError, but it doesn't::
>>> myblob.open("r+").read()
>>> with myblob.open("r+") as fp: fp.read()
'Hi, Blob!\nBlob is fine.'
>>> f4b = myblob.open("a")
>>> f4b.close()
......@@ -194,5 +194,5 @@ If you have a small amount of data, you can pass it to the blob
constructor. (This is a convenience, mostly for writing tests.)
>>> myblob = Blob(b'some data')
>>> myblob.open().read()
>>> with myblob.open() as fp: fp.read()
'some data'
......@@ -43,7 +43,7 @@ Getting stuff out of there works similarly:
>>> blob2 = root['myblob']
>>> IBlob.providedBy(blob2)
True
>>> blob2.open("r").read()
>>> with blob2.open("r") as fp: fp.read()
"I'm a happy Blob."
>>> transaction2.abort()
......@@ -55,7 +55,7 @@ MVCC also works.
>>> _ = f.write(b'I am an ecstatic Blob.')
>>> f.close()
>>> transaction.commit()
>>> connection3.root()['myblob'].open('r').read()
>>> with connection3.root()['myblob'].open('r') as fp: fp.read()
"I'm a happy Blob."
>>> transaction2.abort()
......
......@@ -84,7 +84,7 @@ back to try a copy/remove operation that is successfull::
The blob did not have data before, so it shouldn't have data now::
>>> blob.open('r').read()
>>> with blob.open('r') as fp: fp.read()
'Some data.'
Case 2: We don't have uncommitted data and both the link operation and the
......@@ -107,7 +107,7 @@ exist::
The blob did not have data before, so it shouldn't have data now::
>>> blob.open('r').read()
>>> with blob.open('r') as fp: fp.read()
''
Case 3: We have uncommitted data, but the link and the copy operations fail.
......@@ -115,9 +115,8 @@ The exception will be re-raised and the target file will exist with the
previous uncomitted data::
>>> blob = Blob()
>>> blob_writing = blob.open('w')
>>> _ = blob_writing.write(b'Uncommitted data')
>>> blob_writing.close()
>>> with blob.open('w') as blob_writing:
... _ = blob_writing.write(b'Uncommitted data')
>>> blob.consumeFile('to_import')
Traceback (most recent call last):
......@@ -126,7 +125,7 @@ previous uncomitted data::
The blob did existed before and had uncommitted data, this shouldn't have
changed::
>>> blob.open('r').read()
>>> with blob.open('r') as fp: fp.read()
'Uncommitted data'
>>> os.rename = os_rename
......
......@@ -54,9 +54,13 @@ Make sure our data exists:
>>> items2 = root2['blobdata']
>>> bool(items1.keys() == items2.keys())
True
>>> items1['blob1'].open().read() == items2['blob1'].open().read()
>>> with items1['blob1'].open() as fp1:
... with items2['blob1'].open() as fp2:
... fp1.read() == fp2.read()
True
>>> items1['blob2'].open().read() == items2['blob2'].open().read()
>>> with items1['blob2'].open() as fp1:
... with items2['blob2'].open() as fp2:
... fp1.read() == fp2.read()
True
>>> transaction.get().abort()
......
......@@ -27,7 +27,7 @@ Aborting a blob add leaves the blob unchanged:
>>> blob1._p_oid
>>> blob1._p_jar
>>> blob1.open().read()
>>> with blob1.open() as fp: fp.read()
'this is blob 1'
It doesn't clear the file because there is no previously committed version:
......@@ -51,7 +51,7 @@ state:
>>> with blob1.open('w') as file:
... _ = file.write(b'this is new blob 1')
>>> blob1.open().read()
>>> with blob1.open() as fp: fp.read()
'this is new blob 1'
>>> fname = blob1._p_blob_uncommitted
>>> os.path.exists(fname)
......@@ -62,7 +62,7 @@ state:
False
>>> blob1._p_blob_uncommitted
>>> blob1.open().read()
>>> with blob1.open() as fp: fp.read()
'this is blob 1'
Opening a blob gives us a filehandle. Getting data out of the
......@@ -103,7 +103,7 @@ when we start)::
>>> bool(blob1a._p_changed)
False
>>> blob1a.open('r').read()
>>> with blob1a.open('r') as fp: fp.read()
'this is blob 1'
>>> blob1afh3 = blob1a.open('a')
>>> bool(blob1a._p_changed)
......@@ -124,11 +124,11 @@ Since we committed the current transaction above, the aggregate
changes we've made to blob, blob1a (these refer to the same object) and
blob2 (a different object) should be evident::
>>> blob1.open('r').read()
>>> with blob1.open('r') as fp: fp.read()
'this is blob 1woot!'
>>> blob1a.open('r').read()
>>> with blob1a.open('r') as fp: fp.read()
'this is blob 1woot!'
>>> blob2.open('r').read()
>>> with blob2.open('r') as fp: fp.read()
'this is blob 3'
We shouldn't be able to persist a blob filehandle at commit time
......@@ -158,7 +158,7 @@ connections should result in a write conflict error::
>>> blob1c3fh1 = blob1c3.open('a').write(b'this is from connection 3')
>>> blob1c4fh1 = blob1c4.open('a').write(b'this is from connection 4')
>>> tm1.commit()
>>> root3['blob1'].open('r').read()
>>> with root3['blob1'].open('r') as fp: fp.read()
'this is blob 1woot!this is from connection 3'
>>> tm2.commit()
Traceback (most recent call last):
......@@ -168,10 +168,10 @@ connections should result in a write conflict error::
After the conflict, the winning transaction's result is visible on both
connections::
>>> root3['blob1'].open('r').read()
>>> with root3['blob1'].open('r') as fp: fp.read()
'this is blob 1woot!this is from connection 3'
>>> tm2.abort()
>>> root4['blob1'].open('r').read()
>>> with root4['blob1'].open('r') as fp: fp.read()
'this is blob 1woot!this is from connection 3'
You can't commit a transaction while blob files are open:
......@@ -208,16 +208,16 @@ We do support optimistic savepoints:
>>> blob_fh.close()
>>> root5['blob'] = blob
>>> transaction.commit()
>>> root5['blob'].open("r").read()
>>> with root5['blob'].open("r") as fp: fp.read()
"I'm a happy blob."
>>> blob_fh = root5['blob'].open("a")
>>> _ = blob_fh.write(b" And I'm singing.")
>>> blob_fh.close()
>>> root5['blob'].open("r").read()
>>> with root5['blob'].open("r") as fp: fp.read()
"I'm a happy blob. And I'm singing."
>>> savepoint = transaction.savepoint(optimistic=True)
>>> root5['blob'].open("r").read()
>>> with root5['blob'].open("r") as fp: fp.read()
"I'm a happy blob. And I'm singing."
Savepoints store the blobs in temporary directories in the temporary
......@@ -239,7 +239,7 @@ We support non-optimistic savepoints too:
>>> with root5['blob'].open("a") as file:
... _ = file.write(b" And I'm dancing.")
>>> root5['blob'].open("r").read()
>>> with root5['blob'].open("r") as fp: fp.read()
"I'm a happy blob. And I'm singing. And I'm dancing."
>>> savepoint = transaction.savepoint()
......@@ -253,7 +253,7 @@ Again, the savepoint creates a new savepoints directory:
... _ = file.write(b" And the weather is beautiful.")
>>> savepoint.rollback()
>>> root5['blob'].open("r").read()
>>> with root5['blob'].open("r") as fp: fp.read()
"I'm a happy blob. And I'm singing. And I'm dancing."
>>> transaction.abort()
......@@ -278,7 +278,7 @@ file that can be opened.
>>> blob_fh.close()
>>> root6['blob'] = blob
>>> transaction.commit()
>>> open(blob.committed()).read()
>>> with open(blob.committed()) as fp: fp.read()
"I'm a happy blob."
We can also read committed data by calling open with a 'c' flag:
......@@ -294,7 +294,7 @@ and doesn't prevent us from opening the blob for writing:
>>> with blob.open('w') as file:
... _ = file.write(b'x')
>>> blob.open().read()
>>> with blob.open() as fp: fp.read()
'x'
>>> f.read()
......@@ -342,7 +342,7 @@ uncommitted changes:
BlobError: Uncommitted changes
>>> transaction.commit()
>>> open(blob.committed()).read()
>>> with open(blob.committed()) as fp: fp.read()
"I'm a happy blob."
You can't open a committed blob file for writing:
......@@ -395,7 +395,7 @@ And we shouldn't be able to read the data that we saved:
Of course the old data should be unaffected:
>>> open(blob_storage.loadBlob(blob._p_oid, oldserial)).read()
>>> with open(blob_storage.loadBlob(blob._p_oid, oldserial)) as fp: fp.read()
"I'm a happy blob."
Similarly, the new object wasn't added to the storage:
......
......@@ -76,7 +76,8 @@ def main(args):
else:
s=ZODB.FileStorage.FileStorage('zeo_speed.fs', create=1)
data=open(data).read()
with open(data) as fp:
data = fp.read()
db=ZODB.DB(s,
# disable cache deactivation
cache_size=4000,
......
......@@ -213,7 +213,7 @@ def open_convenience():
>>> db = ZODB.DB('data.fs', blob_dir='blobs')
>>> conn = db.open()
>>> conn.root()['b'].open().read()
>>> with conn.root()['b'].open() as fp: fp.read()
'test'
>>> db.close()
......
......@@ -657,7 +657,7 @@ def pack_with_open_blob_files():
>>> tm1.commit()
>>> conn2.sync()
>>> conn2.root()[2].open().read()
>>> with conn2.root()[2].open() as fp: fp.read()
'some more data'
>>> db.close()
......
......@@ -71,10 +71,9 @@ class RecoverTest(ZODB.tests.util.TestCase):
# Drop size null bytes into num random spots.
for i in range(num):
offset = random.randint(0, self.storage._pos - size)
f = open(self.path, "a+b")
with open(self.path, "a+b") as f:
f.seek(offset)
f.write(b"\0" * size)
f.close()
ITERATIONS = 5
......@@ -106,13 +105,11 @@ class RecoverTest(ZODB.tests.util.TestCase):
self.assertTrue('\n0 bytes removed during recovery' in output, output)
# Verify that the recovered database is identical to the original.
before = open(self.path, 'rb')
with open(self.path, 'rb') as before:
before_guts = before.read()
before.close()
after = open(self.dest, 'rb')
with open(self.dest, 'rb') as after:
after_guts = after.read()
after.close()
self.assertEqual(before_guts, after_guts,
"recovery changed a non-damaged .fs file")
......@@ -162,10 +159,9 @@ class RecoverTest(ZODB.tests.util.TestCase):
self.storage.close()
# Overwrite the entire header.
f = open(self.path, "a+b")
with open(self.path, "a+b") as f:
f.seek(pos1 - 50)
f.write(b"\0" * 100)
f.close()
output = self.recover()
self.assertTrue('error' in output, output)
self.recovered = FileStorage(self.dest)
......@@ -174,10 +170,9 @@ class RecoverTest(ZODB.tests.util.TestCase):
os.rename(self.dest, self.path)
# Overwrite part of the header.
f = open(self.path, "a+b")
with open(self.path, "a+b") as f:
f.seek(pos2 + 10)
f.write(b"\0" * 100)
f.close()
output = self.recover()
self.assertTrue('error' in output, output)
self.recovered = FileStorage(self.dest)
......@@ -194,13 +189,12 @@ class RecoverTest(ZODB.tests.util.TestCase):
pos = self.storage._txn_find(tid, 0)
# Overwrite its status with 'c'.
f = open(self.path, "r+b")
with open(self.path, "r+b") as f:
f.seek(pos + 16)
current_status = f.read(1)
self.assertEqual(current_status, b' ')
f.seek(pos + 16)
f.write(b'c')
f.close()
# Try to recover. The original bug was that this never completed --
# infinite loop in fsrecover.py. Also, in the ZODB 3.2 line,
......
......@@ -182,7 +182,8 @@ class BlobUndoTests(BlobTestBase):
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
self.assertEqual(blob.open('r').read(), b'this is state 1')
with blob.open('r') as file:
self.assertEqual(file.read(), b'this is state 1')
database.close()
......@@ -208,7 +209,8 @@ class BlobUndoTests(BlobTestBase):
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
self.assertEqual(blob.open('r').read(), b'this is state 1')
with blob.open('r') as file:
self.assertEqual(file.read(), b'this is state 1')
database.close()
......@@ -233,12 +235,14 @@ class BlobUndoTests(BlobTestBase):
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
self.assertEqual(blob.open('r').read(), b'this is state 1')
with blob.open('r') as file:
self.assertEqual(file.read(), b'this is state 1')
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
self.assertEqual(blob.open('r').read(), b'this is state 2')
with blob.open('r') as file:
self.assertEqual(file.read(), b'this is state 2')
database.close()
......@@ -262,7 +266,8 @@ class BlobUndoTests(BlobTestBase):
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
self.assertEqual(blob.open('r').read(), b'this is state 1')
with blob.open('r') as file:
self.assertEqual(file.read(), b'this is state 1')
database.close()
......@@ -351,7 +356,7 @@ def commit_from_wrong_partition():
>>> transaction.commit() # doctest: +ELLIPSIS
Copied blob file ...
>>> root['blob'].open().read()
>>> with root['blob'].open() as fp: fp.read()
'test'
Works with savepoints too:
......@@ -365,7 +370,7 @@ Works with savepoints too:
>>> transaction.commit() # doctest: +ELLIPSIS
Copied blob file ...
>>> root['blob2'].open().read()
>>> with root['blob2'].open() as fp: fp.read()
'test2'
>>> os.rename = os_rename
......@@ -565,7 +570,7 @@ def do_not_depend_on_cwd():
... _ = file.write(b'data')
>>> transaction.commit()
>>> os.chdir(here)
>>> conn.root()['blob'].open().read()
>>> with conn.root()['blob'].open() as fp: fp.read()
'data'
>>> bs.close()
......@@ -587,14 +592,14 @@ def savepoint_isolation():
>>> with conn2.root.b.open('w') as file:
... _ = file.write(b'2')
>>> _ = tm.savepoint()
>>> conn.root.b.open().read()
>>> with conn.root.b.open() as fp: fp.read()
'1'
>>> conn2.root.b.open().read()
>>> with conn2.root.b.open() as fp: fp.read()
'2'
>>> transaction.abort()
>>> tm.commit()
>>> conn.sync()
>>> conn.root.b.open().read()
>>> with conn.root.b.open() as fp: fp.read()
'2'
>>> db.close()
"""
......@@ -618,9 +623,9 @@ def savepoint_commits_without_invalidations_out_of_order():
>>> with conn2.root.b.open('w') as file:
... _ = file.write(b'2')
>>> _ = tm1.savepoint()
>>> conn1.root.b.open().read()
>>> with conn1.root.b.open() as fp: fp.read()
'1'
>>> conn2.root.b.open().read()
>>> with conn2.root.b.open() as fp: fp.read()
'2'
>>> tm2.commit()
>>> tm1.commit() # doctest: +IGNORE_EXCEPTION_DETAIL
......@@ -668,7 +673,7 @@ def lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop():
>>> old_serial = blob._p_serial
>>> blob._p_changed = True
>>> transaction.commit()
>>> blob.open().read()
>>> with blob.open() as fp: fp.read()
'blah'
>>> old_serial == blob._p_serial
True
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment