Commit 548deaf2 authored by Stefan Behnel's avatar Stefan Behnel

remove usage of deprecated "U" file open mode flag which is enabled anyway

parent d6109ba4
......@@ -399,7 +399,7 @@ def parse_dependencies(source_filename):
# Actual parsing is way to slow, so we use regular expressions.
# The only catch is that we must strip comments and string
# literals ahead of time.
fh = Utils.open_source_file(source_filename, "rU", error_handling='ignore')
fh = Utils.open_source_file(source_filename, error_handling='ignore')
try:
source = fh.read()
finally:
......
......@@ -336,8 +336,7 @@ class Context(object):
# Parse the given source file and return a parse tree.
num_errors = Errors.num_errors
try:
f = Utils.open_source_file(source_filename, "rU")
try:
with Utils.open_source_file(source_filename) as f:
from . import Parsing
s = PyrexScanner(f, source_desc, source_encoding = f.encoding,
scope = scope, context = self)
......@@ -349,8 +348,6 @@ class Context(object):
raise RuntimeError(
"Formal grammer can only be used with compiled Cython with an available pgen.")
ConcreteSyntaxTree.p_module(source_filename)
finally:
f.close()
except UnicodeDecodeError, e:
#import traceback
#traceback.print_exc()
......@@ -360,11 +357,8 @@ class Context(object):
position = e.args[2]
encoding = e.args[0]
f = open(source_filename, "rb")
try:
with open(source_filename, "rb") as f:
byte_data = f.read()
finally:
f.close()
# FIXME: make this at least a little less inefficient
for idx, c in enumerate(byte_data):
......
......@@ -1768,13 +1768,10 @@ def p_include_statement(s, ctx):
include_file_path = s.context.find_include_file(include_file_name, pos)
if include_file_path:
s.included_files.append(include_file_name)
f = Utils.open_source_file(include_file_path, mode="rU")
source_desc = FileSourceDescriptor(include_file_path)
s2 = PyrexScanner(f, source_desc, s, source_encoding=f.encoding, parse_comments=s.parse_comments)
try:
with Utils.open_source_file(include_file_path) as f:
source_desc = FileSourceDescriptor(include_file_path)
s2 = PyrexScanner(f, source_desc, s, source_encoding=f.encoding, parse_comments=s.parse_comments)
tree = p_statement_list(s2, ctx)
finally:
f.close()
return tree
else:
return None
......
......@@ -199,15 +199,10 @@ class FileSourceDescriptor(SourceDescriptor):
return lines
except KeyError:
pass
f = Utils.open_source_file(
self.filename, encoding=encoding,
error_handling=error_handling,
# newline normalisation is costly before Py2.6
require_normalised_newlines=False)
try:
with Utils.open_source_file(self.filename, encoding=encoding, error_handling=error_handling) as f:
lines = list(f)
finally:
f.close()
if key in self._lines:
self._lines[key] = lines
else:
......
......@@ -244,16 +244,13 @@ def skip_bom(f):
def open_source_file(source_filename, mode="r",
encoding=None, error_handling=None,
require_normalised_newlines=True):
encoding=None, error_handling=None):
if encoding is None:
# Most of the time the coding is unspecified, so be optimistic that
# it's UTF-8.
f = open_source_file(source_filename, encoding="UTF-8", mode=mode, error_handling='ignore')
encoding = detect_opened_file_encoding(f)
if (encoding == "UTF-8"
and error_handling == 'ignore'
and require_normalised_newlines):
if encoding == "UTF-8" and error_handling == 'ignore':
f.seek(0)
skip_bom(f)
return f
......@@ -266,8 +263,7 @@ def open_source_file(source_filename, mode="r",
if source_filename.startswith(loader.archive):
return open_source_from_loader(
loader, source_filename,
encoding, error_handling,
require_normalised_newlines)
encoding, error_handling)
except (NameError, AttributeError):
pass
......@@ -279,8 +275,7 @@ def open_source_file(source_filename, mode="r",
def open_source_from_loader(loader,
source_filename,
encoding=None, error_handling=None,
require_normalised_newlines=True):
encoding=None, error_handling=None):
nrmpath = os.path.normpath(source_filename)
arcname = nrmpath[len(loader.archive)+1:]
data = loader.get_data(arcname)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment