Commit 0edf4568 authored by Robert Bradshaw's avatar Robert Bradshaw

Merge branch 'master' into 0.20.x

Conflicts:
	CHANGES.rst
parents 5e08f83c e40d032f
......@@ -6,7 +6,6 @@ Cython Changelog
Latest
=======
Features added
--------------
......@@ -16,6 +15,12 @@ Bugs fixed
* List/Tuple literals multiplied by more than one factor were only multiplied
by the last factor instead of all.
* Lookups of special methods (specifically for context managers) could fail
in Python <= 2.6/3.1.
* Local variables were erroneously appended to the signature introspection
of Cython implemented functions with keyword-only arguments under Python 3.
* In-place assignments to variables with inferred Python builtin/extension
types could fail with type errors if the result value type was incompatible
with the type of the previous value.
......
......@@ -91,7 +91,14 @@ def file_hash(filename):
path = os.path.normpath(filename.encode("UTF-8"))
m = hashlib.md5(str(len(path)) + ":")
m.update(path)
m.update(open(filename).read())
f = open(filename, 'rb')
try:
data = f.read(65000)
while data:
m.update(data)
data = f.read(65000)
finally:
f.close()
return m.hexdigest()
def parse_list(s):
......@@ -671,7 +678,7 @@ def cythonize(module_list, exclude=[], nthreads=0, aliases=None, quiet=False, fo
options['include_path'] = ['.']
if 'common_utility_include_dir' in options:
if options.get('cache'):
raise NotImplementedError, "common_utility_include_dir does not yet work with caching"
raise NotImplementedError("common_utility_include_dir does not yet work with caching")
if not os.path.exists(options['common_utility_include_dir']):
os.makedirs(options['common_utility_include_dir'])
c_options = CompilationOptions(**options)
......
......@@ -267,12 +267,12 @@ except ImportError:
for name, value in kwd_values.items():
if name in args:
if name in all:
raise TypeError, "Duplicate argument %s" % name
raise TypeError("Duplicate argument %s" % name)
all[name] = kwd_values.pop(name)
if kwds is not None:
all[kwds] = kwd_values
elif kwd_values:
raise TypeError, "Unexpected keyword arguments: %s" % kwd_values.keys()
raise TypeError("Unexpected keyword arguments: %s" % kwd_values.keys())
if defaults is None:
defaults = ()
first_default = len(args) - len(defaults)
......@@ -281,7 +281,7 @@ except ImportError:
if ix >= first_default:
all[name] = defaults[ix - first_default]
else:
raise TypeError, "Missing argument: %s" % name
raise TypeError("Missing argument: %s" % name)
return all
def get_body(source):
......
......@@ -17,7 +17,6 @@ special_chars = [
(u'>', u'\xF1', u'&gt;'),
]
line_pos_comment = re.compile(r'/\*.*?<<<<<<<<<<<<<<.*?\*/\n*', re.DOTALL)
class AnnotationCCodeWriter(CCodeWriter):
......@@ -141,6 +140,7 @@ function toggleDiv(id) {
return ur"<span class='%s'>%s</span>" % (
group_name, match.group(group_name))
pos_comment_marker = u'/* \N{HORIZONTAL ELLIPSIS} */\n'
k = 0
code_source_file = self.code.get(source_filename, {})
for line in lines:
......@@ -150,6 +150,9 @@ function toggleDiv(id) {
except KeyError:
code = ''
else:
code = _replace_pos_comment(pos_comment_marker, code)
if code.startswith(pos_comment_marker):
code = code[len(pos_comment_marker):]
code = html_escape(code)
calls = zero_calls.copy()
......@@ -165,7 +168,6 @@ function toggleDiv(id) {
f.write(line.rstrip())
f.write(u'</pre>\n')
code = re.sub(line_pos_comment, '', code) # inline annotations are redundant
f.write(u"<pre id='line%s' class='code' style='background-color: #%s'>%s</pre>" % (k, color, code))
f.write(u'</body></html>\n')
f.close()
......@@ -183,6 +185,13 @@ _parse_code = re.compile(
).sub
_replace_pos_comment = re.compile(
# this matches what Cython generates as code line marker comment
ur'^\s*/\*(?:(?:[^*]|\*[^/])*\n)+\s*\*/\s*\n',
re.M
).sub
class AnnotationItem(object):
def __init__(self, style, text, tag="", size=0):
......
......@@ -249,7 +249,13 @@ class UtilityCodeBase(object):
continue
# only pass lists when we have to: most argument expect one value or None
if name == 'requires':
values = [ cls.load(dep, from_file, **orig_kwargs) for dep in values ]
if orig_kwargs:
values = [cls.load(dep, from_file, **orig_kwargs)
for dep in sorted(values)]
else:
# dependencies are rarely unique, so use load_cached() when we can
values = [cls.load_cached(dep, from_file)
for dep in sorted(values)]
elif not values:
values = None
elif len(values) == 1:
......@@ -269,16 +275,16 @@ class UtilityCodeBase(object):
return cls(**kwargs)
@classmethod
def load_cached(cls, utility_code_name, from_file=None, _cache={}):
def load_cached(cls, utility_code_name, from_file=None, __cache={}):
"""
Calls .load(), but using a per-type cache based on utility name and file name.
"""
key = (cls, from_file, utility_code_name)
try:
return _cache[key]
return __cache[key]
except KeyError:
pass
code = _cache[key] = cls.load(utility_code_name, from_file)
code = __cache[key] = cls.load(utility_code_name, from_file)
return code
@classmethod
......
......@@ -159,7 +159,7 @@ def report_error(err):
echo_file.write(line.encode('ASCII', 'replace'))
num_errors = num_errors + 1
if Options.fast_fail:
raise AbortError, "fatal errors"
raise AbortError("fatal errors")
def error(position, message):
#print "Errors.error:", repr(position), repr(message) ###
......
......@@ -1690,7 +1690,7 @@ class NameNode(AtomicExprNode):
return self
def analyse_target_types(self, env):
self.analyse_entry(env)
self.analyse_entry(env, is_target=True)
if (not self.is_lvalue() and self.entry.is_cfunction and
self.entry.fused_cfunction and self.entry.as_variable):
......@@ -1750,12 +1750,12 @@ class NameNode(AtomicExprNode):
gil_message = "Accessing Python global or builtin"
def analyse_entry(self, env):
def analyse_entry(self, env, is_target=False):
#print "NameNode.analyse_entry:", self.name ###
self.check_identifier_kind()
entry = self.entry
type = entry.type
if (type.is_pyobject and self.inferred_type and
if (not is_target and type.is_pyobject and self.inferred_type and
self.inferred_type.is_builtin_type):
# assume that type inference is smarter than the static entry
type = self.inferred_type
......@@ -2536,7 +2536,9 @@ class WithExitCallNode(ExprNode):
result_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.mark_pos(self.pos)
code.putln("%s = PyObject_Call(%s, %s, NULL);" % (
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_Call(%s, %s, NULL);" % (
result_var,
self.with_stat.exit_var,
self.args.result()))
......@@ -4657,8 +4659,10 @@ class SimpleCallNode(CallNode):
code.globalstate.use_utility_code(self.function.entry.utility_code)
if func_type.is_pyobject:
arg_code = self.arg_tuple.py_result()
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = PyObject_Call(%s, %s, NULL); %s" % (
"%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
self.function.py_result(),
arg_code,
......@@ -5087,8 +5091,10 @@ class GeneralCallNode(CallNode):
kwargs = self.keyword_args.py_result()
else:
kwargs = 'NULL'
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = PyObject_Call(%s, %s, %s); %s" % (
"%s = __Pyx_PyObject_Call(%s, %s, %s); %s" % (
self.result(),
self.function.py_result(),
self.positional_args.py_result(),
......@@ -7604,18 +7610,14 @@ class CodeObjectNode(ExprNode):
def __init__(self, def_node):
ExprNode.__init__(self, def_node.pos, def_node=def_node)
args = list(def_node.args)
if def_node.star_arg:
args.append(def_node.star_arg)
if def_node.starstar_arg:
args.append(def_node.starstar_arg)
local_vars = [ arg for arg in def_node.local_scope.var_entries
if arg.name ]
# if we have args/kwargs, then the first two in var_entries are those
local_vars = [arg for arg in def_node.local_scope.var_entries if arg.name]
self.varnames = TupleNode(
def_node.pos,
args = [ IdentifierStringNode(arg.pos, value=arg.name)
for arg in args + local_vars ],
is_temp = 0,
is_literal = 1)
args=[IdentifierStringNode(arg.pos, value=arg.name)
for arg in args + local_vars],
is_temp=0,
is_literal=1)
def may_be_none(self):
return False
......@@ -7635,11 +7637,18 @@ class CodeObjectNode(ExprNode):
file_path = StringEncoding.BytesLiteral(func.pos[0].get_filenametable_entry().encode('utf8'))
file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True)
code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, 0, 0, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % (
flags = []
if self.def_node.star_arg:
flags.append('CO_VARARGS')
if self.def_node.starstar_arg:
flags.append('CO_VARKEYWORDS')
code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % (
self.result_code,
len(func.args), # argcount
len(func.args) - func.num_kwonly_args, # argcount
func.num_kwonly_args, # kwonlyargcount (Py3 only)
len(self.varnames.args), # nlocals
'|'.join(flags) or '0', # flags
Naming.empty_bytes, # code
Naming.empty_tuple, # consts
Naming.empty_tuple, # names (FIXME)
......@@ -7950,8 +7959,8 @@ class LocalsDictItemNode(DictItemNode):
class FuncLocalsExprNode(DictNode):
def __init__(self, pos, env):
local_vars = [entry.name for entry in env.entries.values()
if entry.name]
local_vars = sorted([
entry.name for entry in env.entries.values() if entry.name])
items = [LocalsDictItemNode(
pos, key=IdentifierStringNode(pos, value=var),
value=NameNode(pos, name=var, allow_null=True))
......@@ -8373,6 +8382,9 @@ class TypecastNode(ExprNode):
"Cannot cast to a function type")
self.type = PyrexTypes.error_type
self.operand = self.operand.analyse_types(env)
if self.type is PyrexTypes.c_bint_type:
# short circuit this to a coercion
return self.operand.coerce_to_boolean(env)
to_py = self.type.is_pyobject
from_py = self.operand.type.is_pyobject
if from_py and not to_py and self.operand.is_ephemeral():
......@@ -8380,10 +8392,7 @@ class TypecastNode(ExprNode):
error(self.pos, "Casting temporary Python object to non-numeric non-Python type")
if to_py and not from_py:
if self.type is bytes_type and self.operand.type.is_int:
# FIXME: the type cast node isn't needed in this case
# and can be dropped once analyse_types() can return a
# different node
self.operand = CoerceIntToBytesNode(self.operand, env)
return CoerceIntToBytesNode(self.operand, env)
elif self.operand.type.can_coerce_to_pyobject(env):
self.result_ctype = py_object_type
base_type = self.base_type.analyse(env)
......@@ -8405,7 +8414,7 @@ class TypecastNode(ExprNode):
else:
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.type, self.operand.type))
elif from_py and to_py:
if self.typecheck and self.type.is_pyobject:
if self.typecheck:
self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True)
elif isinstance(self.operand, SliceIndexNode):
# This cast can influence the created type of string slices.
......@@ -9214,9 +9223,9 @@ class AddNode(NumBinopNode):
if type1 is unicode_type or type2 is unicode_type:
if type1.is_builtin_type and type2.is_builtin_type:
if self.operand1.may_be_none() or self.operand2.may_be_none():
return '__Pyx_PyUnicode_Concat'
return '__Pyx_PyUnicode_ConcatSafe'
else:
return 'PyUnicode_Concat'
return '__Pyx_PyUnicode_Concat'
return super(AddNode, self).py_operation_function()
......
......@@ -193,9 +193,11 @@ class ControlFlow(object):
def mark_reference(self, node, entry):
if self.block and self.is_tracked(entry):
self.block.stats.append(NameReference(node, entry))
# Local variable is definitely bound after this reference
if not node.allow_null:
self.block.bounded.add(entry)
## XXX: We don't track expression evaluation order so we can't use
## XXX: successful reference as initialization sign.
## # Local variable is definitely bound after this reference
## if not node.allow_null:
## self.block.bounded.add(entry)
self.entries.add(entry)
def normalize(self):
......@@ -548,9 +550,9 @@ def check_definitions(flow, compiler_directives):
references[stat.node] = stat.entry
stat.entry.cf_references.append(stat)
stat.node.cf_state.update(state)
if not stat.node.allow_null:
i_state &= ~i_assmts.bit
# after successful read, the state is known to be initialised
## if not stat.node.allow_null:
## i_state &= ~i_assmts.bit
## # after successful read, the state is known to be initialised
state.discard(Uninitialized)
state.discard(Unknown)
for assmt in state:
......@@ -798,7 +800,7 @@ class ControlFlowAnalysis(CythonTransform):
return node
def visit_AssignmentNode(self, node):
raise InternalError, "Unhandled assignment node"
raise InternalError("Unhandled assignment node")
def visit_SingleAssignmentNode(self, node):
self._visit(node.rhs)
......@@ -1097,7 +1099,7 @@ class ControlFlowAnalysis(CythonTransform):
return node
def visit_LoopNode(self, node):
raise InternalError, "Generic loops are not supported"
raise InternalError("Generic loops are not supported")
def visit_WithTargetAssignmentStatNode(self, node):
self.mark_assignment(node.lhs, node.rhs)
......@@ -1121,6 +1123,7 @@ class ControlFlowAnalysis(CythonTransform):
## XXX: links to exception handling point should be added by
## XXX: children nodes
self.flow.block.add_child(entry_point)
self.flow.nextblock()
self._visit(node.body)
self.flow.exceptions.pop()
......@@ -1181,6 +1184,7 @@ class ControlFlowAnalysis(CythonTransform):
self.flow.block = body_block
## XXX: Is it still required
body_block.add_child(entry_point)
self.flow.nextblock()
self._visit(node.body)
self.flow.exceptions.pop()
if self.flow.loops:
......
......@@ -309,7 +309,14 @@ class Context(object):
position = e.args[2]
encoding = e.args[0]
for idx, c in enumerate(open(source_filename, "rb").read()):
f = open(source_filename, "rb")
try:
byte_data = f.read()
finally:
f.close()
# FIXME: make this at least a little less inefficient
for idx, c in enumerate(byte_data):
if c in (ord('\n'), '\n'):
line += 1
column = 0
......
......@@ -346,15 +346,44 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
globalstate.finalize_main_c_code()
f = open_new_file(result.c_file)
try:
rootwriter.copyto(f)
if options.gdb_debug:
self._serialize_lineno_map(env, rootwriter)
finally:
f.close()
result.c_file_generated = 1
if options.gdb_debug:
self._serialize_lineno_map(env, rootwriter)
if Options.annotate or options.annotate:
self._generate_annotations(rootwriter, result)
def _generate_annotations(self, rootwriter, result):
self.annotate(rootwriter)
rootwriter.save_annotation(result.main_source_file, result.c_file)
# if we included files, additionally generate one annotation file for each
if not self.scope.included_files:
return
search_include_file = self.scope.context.search_include_directories
target_dir = os.path.abspath(os.path.dirname(result.c_file))
for included_file in self.scope.included_files:
target_file = os.path.abspath(os.path.join(target_dir, included_file))
target_file_dir = os.path.dirname(target_file)
if not target_file_dir.startswith(target_dir):
# any other directories may not be writable => avoid trying
continue
source_file = search_include_file(included_file, "", self.pos, include=True)
if not source_file:
continue
if target_file_dir != target_dir and not os.path.exists(target_file_dir):
try:
os.makedirs(target_file_dir)
except OSError, e:
import errno
if e.errno != errno.EEXIST:
raise
rootwriter.save_annotation(source_file, target_file)
def _serialize_lineno_map(self, env, ccodewriter):
tb = env.context.gdb_debug_outputwriter
markers = ccodewriter.buffer.allmarkers()
......@@ -382,13 +411,12 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
self.find_referenced_modules(imported_module, module_list, modules_seen)
module_list.append(env)
def sort_types_by_inheritance(self, type_dict, getkey):
def sort_types_by_inheritance(self, type_dict, type_order, getkey):
# copy the types into a list moving each parent type before
# its first child
type_items = type_dict.items()
type_list = []
for i, item in enumerate(type_items):
key, new_entry = item
for i, key in enumerate(type_order):
new_entry = type_dict[key]
# collect all base classes to check for children
hierarchy = set()
......@@ -413,43 +441,59 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
return type_list
def sort_type_hierarchy(self, module_list, env):
vtab_dict = {}
vtabslot_dict = {}
# poor developer's OrderedDict
vtab_dict, vtab_dict_order = {}, []
vtabslot_dict, vtabslot_dict_order = {}, []
for module in module_list:
for entry in module.c_class_entries:
if not entry.in_cinclude:
if entry.used and not entry.in_cinclude:
type = entry.type
if type.vtabstruct_cname:
vtab_dict[type.vtabstruct_cname] = entry
key = type.vtabstruct_cname
if not key:
continue
if key in vtab_dict:
# FIXME: this should *never* happen, but apparently it does
# for Cython generated utility code
from Cython.Compiler.UtilityCode import NonManglingModuleScope
assert isinstance(entry.scope, NonManglingModuleScope), str(entry.scope)
assert isinstance(vtab_dict[key].scope, NonManglingModuleScope), str(vtab_dict[key].scope)
else:
vtab_dict[key] = entry
vtab_dict_order.append(key)
all_defined_here = module is env
for entry in module.type_entries:
if all_defined_here or entry.defined_in_pxd:
if entry.used and (all_defined_here or entry.defined_in_pxd):
type = entry.type
if type.is_extension_type and not entry.in_cinclude:
type = entry.type
vtabslot_dict[type.objstruct_cname] = entry
key = type.objstruct_cname
assert key not in vtabslot_dict, key
vtabslot_dict[key] = entry
vtabslot_dict_order.append(key)
def vtabstruct_cname(entry_type):
return entry_type.vtabstruct_cname
vtab_list = self.sort_types_by_inheritance(
vtab_dict, vtabstruct_cname)
vtab_dict, vtab_dict_order, vtabstruct_cname)
def objstruct_cname(entry_type):
return entry_type.objstruct_cname
vtabslot_list = self.sort_types_by_inheritance(
vtabslot_dict, objstruct_cname)
vtabslot_dict, vtabslot_dict_order, objstruct_cname)
return (vtab_list, vtabslot_list)
def sort_cdef_classes(self, env):
key_func = operator.attrgetter('objstruct_cname')
entry_dict = {}
entry_dict, entry_order = {}, []
for entry in env.c_class_entries:
key = key_func(entry.type)
assert key not in entry_dict
assert key not in entry_dict, key
entry_dict[key] = entry
entry_order.append(key)
env.c_class_entries[:] = self.sort_types_by_inheritance(
entry_dict, key_func)
entry_dict, entry_order, key_func)
def generate_type_definitions(self, env, modules, vtab_list, vtabslot_list, code):
# TODO: Why are these separated out?
......@@ -1892,7 +1936,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
env.use_utility_code(streq_utility_code)
code.putln()
code.putln("static char* %s_type_names[] = {" % Naming.import_star)
for name, entry in env.entries.items():
for name, entry in sorted(env.entries.items()):
if entry.is_type:
code.putln('"%s",' % name)
code.putln("0")
......
......@@ -1554,6 +1554,9 @@ class EarlyReplaceBuiltinCalls(Visitor.EnvTransform):
"""Replace min(a,b,...) and max(a,b,...) by explicit comparison code.
"""
if len(args) <= 1:
if len(args) == 1 and args[0].is_sequence_constructor:
args = args[0].args
else:
# leave this to Python
return node
......@@ -2187,6 +2190,7 @@ class OptimizeBuiltinCalls(Visitor.MethodDispatcherTransform):
temp = None
if isinstance(types, ExprNodes.TupleNode):
types = types.args
if arg.is_attribute or not arg.is_simple():
arg = temp = UtilNodes.ResultRefNode(arg)
elif types.type is Builtin.type_type:
types = [types]
......
......@@ -1970,7 +1970,7 @@ class ExpandInplaceOperators(EnvTransform):
return node, [node]
elif isinstance(node, ExprNodes.IndexNode):
if node.is_buffer_access:
raise ValueError, "Buffer access"
raise ValueError("Buffer access")
base, temps = side_effect_free_reference(node.base)
index = LetRefNode(node.index)
return ExprNodes.IndexNode(node.pos, base=base, index=index), temps + [index]
......@@ -2304,7 +2304,7 @@ class CreateClosureClasses(CythonTransform):
if not from_closure and (self.path or inner_node):
if not inner_node:
if not node.py_cfunc_node:
raise InternalError, "DefNode does not have assignment node"
raise InternalError("DefNode does not have assignment node")
inner_node = node.py_cfunc_node
inner_node.needs_self_code = False
node.needs_outer_scope = False
......
......@@ -19,7 +19,7 @@ def dumptree(t):
def abort_on_errors(node):
# Stop the pipeline if there are any errors.
if Errors.num_errors != 0:
raise AbortError, "pipeline break"
raise AbortError("pipeline break")
return node
def parse_stage_factory(context):
......
......@@ -1542,7 +1542,7 @@ class LocalScope(Scope):
if entry is not None:
if entry.scope is not self and entry.scope.is_closure_scope:
if hasattr(entry.scope, "scope_class"):
raise InternalError, "lookup() after scope class created."
raise InternalError("lookup() after scope class created.")
# The actual c fragment for the different scopes differs
# on the outside and inside, so we make a new entry
entry.in_closure = True
......
......@@ -271,13 +271,15 @@ class TestDebugTransform(DebuggerTestCase):
assert 'puts' in spam_stepinto
assert 'some_c_function' in spam_stepinto
except:
print open(self.debug_dest).read()
f = open(self.debug_dest)
try:
print(f.read())
finally:
f.close()
raise
if __name__ == "__main__":
import unittest
unittest.main()
......@@ -383,6 +383,7 @@ class SimpleAssignmentTypeInferer(object):
if not types:
node_type = py_object_type
else:
entry = node.entry
node_type = spanning_type(
types, entry.might_overflow, entry.pos)
node.inferred_type = node_type
......@@ -392,6 +393,7 @@ class SimpleAssignmentTypeInferer(object):
if assmt.inferred_type is not None]
if not types:
return
entry = node.entry
return spanning_type(types, entry.might_overflow, entry.pos)
def resolve_assignments(assignments):
......@@ -404,10 +406,9 @@ class SimpleAssignmentTypeInferer(object):
infer_name_node_type(node)
# Resolve assmt
inferred_type = assmt.infer_type()
done = False
assmts_resolved.add(assmt)
resolved.add(assmt)
assignments -= resolved
assignments.difference_update(resolved)
return resolved
def partial_infer(assmt):
......@@ -427,10 +428,8 @@ class SimpleAssignmentTypeInferer(object):
# try to handle circular references
partials = set()
for assmt in assignments:
partial_types = []
if assmt in partial_assmts:
continue
for node in assmt_to_names[assmt]:
if partial_infer(assmt):
partials.add(assmt)
assmts_resolved.add(assmt)
......@@ -542,7 +541,7 @@ def safe_spanning_type(types, might_overflow, pos):
return result_type
# TODO: double complex should be OK as well, but we need
# to make sure everything is supported.
elif result_type.is_int and not might_overflow:
elif (result_type.is_int or result_type.is_enum) and not might_overflow:
return result_type
return py_object_type
......
......@@ -35,6 +35,7 @@ def make_command_file(path_to_debug_info, prefix_code='', no_import=False):
fd, tempfilename = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
try:
f.write(prefix_code)
f.write('set breakpoint pending on\n')
f.write("set print pretty on\n")
......@@ -46,7 +47,11 @@ def make_command_file(path_to_debug_info, prefix_code='', no_import=False):
pass
else:
path = os.path.join(path_to_debug_info, "cython_debug", "interpreter")
interpreter = open(path).read()
interpreter_file = open(path)
try:
interpreter = interpreter_file.read()
finally:
interpreter_file.close()
f.write("file %s\n" % interpreter)
f.write('\n'.join('cy import %s\n' % fn for fn in debug_files))
f.write(textwrap.dedent('''\
......@@ -62,7 +67,7 @@ def make_command_file(path_to_debug_info, prefix_code='', no_import=False):
source .cygdbinit
'''))
finally:
f.close()
return tempfilename
......
......@@ -35,7 +35,9 @@ build_ext = sys.modules['Cython.Distutils.build_ext']
have_gdb = None
def test_gdb():
global have_gdb
if have_gdb is None:
if have_gdb is not None:
return have_gdb
try:
p = subprocess.Popen(['gdb', '-v'], stdout=subprocess.PIPE)
have_gdb = True
......@@ -43,7 +45,7 @@ def test_gdb():
# gdb was not installed
have_gdb = False
else:
gdb_version = p.stdout.read().decode('ascii')
gdb_version = p.stdout.read().decode('ascii', 'ignore')
p.wait()
p.stdout.close()
......@@ -54,17 +56,23 @@ def test_gdb():
if gdb_version_number >= [7, 2]:
python_version_script = tempfile.NamedTemporaryFile(mode='w+')
try:
python_version_script.write(
'python import sys; print("%s %s" % sys.version_info[:2])')
python_version_script.flush()
p = subprocess.Popen(['gdb', '-batch', '-x', python_version_script.name],
stdout=subprocess.PIPE)
try:
python_version = p.stdout.read().decode('ascii')
p.wait()
finally:
p.stdout.close()
try:
python_version_number = list(map(int, python_version.split()))
except ValueError:
have_gdb = False
finally:
python_version_script.close()
# Be Python 3 compatible
if (not have_gdb
......@@ -146,6 +154,7 @@ class DebuggerTestCase(unittest.TestCase):
finally:
optimization_disabler.restore_state()
sys.stderr = stderr
new_stderr.close()
# ext = Cython.Distutils.extension.Extension(
# 'codefile',
......@@ -227,45 +236,6 @@ class GdbDebuggerTestCase(DebuggerTestCase):
os.path.abspath(Cython.__file__))))
env = dict(os.environ, PYTHONPATH=os.pathsep.join(paths))
try:
p = subprocess.Popen(['gdb', '-v'], stdout=subprocess.PIPE)
have_gdb = True
except OSError:
# gdb was not installed
have_gdb = False
else:
gdb_version = p.stdout.read().decode('ascii')
p.wait()
p.stdout.close()
if have_gdb:
# Based on Lib/test/test_gdb.py
regex = "^GNU gdb [^\d]*(\d+)\.(\d+)"
gdb_version_number = list(map(int, re.search(regex, gdb_version).groups()))
if gdb_version_number >= [7, 2]:
python_version_script = tempfile.NamedTemporaryFile(mode='w+')
python_version_script.write(
'python import sys; print("%s %s" % sys.version_info[:2])')
python_version_script.flush()
p = subprocess.Popen(['gdb', '-batch', '-x', python_version_script.name],
stdout=subprocess.PIPE)
python_version = p.stdout.read().decode('ascii')
p.wait()
try:
python_version_number = list(map(int, python_version.split()))
except ValueError:
have_gdb = False
# Be Python 3 compatible
if (not have_gdb
or gdb_version_number < [7, 2]
or python_version_number < [2, 6]):
self.p = None
warnings.warn(
'Skipping gdb tests, need gdb >= 7.2 with Python >= 2.6')
else:
self.p = subprocess.Popen(
args,
stdout=open(os.devnull, 'w'),
......@@ -276,10 +246,15 @@ class GdbDebuggerTestCase(DebuggerTestCase):
if not test_gdb():
return
try:
super(GdbDebuggerTestCase, self).tearDown()
if self.p:
self.p.stderr.close()
try: self.p.stdout.close()
except: pass
try: self.p.stderr.close()
except: pass
self.p.wait()
finally:
os.remove(self.gdb_command_file)
......@@ -292,15 +267,15 @@ class TestAll(GdbDebuggerTestCase):
out, err = self.p.communicate()
err = err.decode('UTF-8')
exit_status = self.p.wait()
exit_status = self.p.returncode
if exit_status == 1:
sys.stderr.write(err)
elif exit_status >= 2:
border = '*' * 30
start = '%s v INSIDE GDB v %s' % (border, border)
end = '%s ^ INSIDE GDB ^ %s' % (border, border)
errmsg = '\n%s\n%s%s' % (start, err, end)
border = u'*' * 30
start = u'%s v INSIDE GDB v %s' % (border, border)
end = u'%s ^ INSIDE GDB ^ %s' % (border, border)
errmsg = u'\n%s\n%s%s' % (start, err, end)
sys.stderr.write(errmsg)
......
......@@ -17,7 +17,7 @@
#
# Author: Lars Buitinck
cdef extern from "numpy/npy_math.h":
cdef extern from "numpy/npy_math.h" nogil:
# Floating-point classification
long double NAN "NPY_NAN"
long double INFINITY "NPY_INFINITY"
......@@ -30,8 +30,6 @@ cdef extern from "numpy/npy_math.h":
bint isnan "npy_isnan"(long double)
bint signbit "npy_signbit"(long double)
double copysign "npy_copysign"(double, double)
# Math constants
long double E "NPY_E"
long double LOG2E "NPY_LOG2E" # ln(e) / ln(2)
......@@ -46,5 +44,90 @@ cdef extern from "numpy/npy_math.h":
long double EULER "NPY_EULER" # Euler constant (gamma, 0.57721)
# Low-level floating point manipulation (NumPy >=1.4)
float copysignf "npy_copysignf"(float, float)
float nextafterf "npy_nextafterf"(float x, float y)
float spacingf "npy_spacingf"(float x)
double copysign "npy_copysign"(double, double)
double nextafter "npy_nextafter"(double x, double y)
double spacing "npy_spacing"(double x)
long double copysignl "npy_copysignl"(long double, long double)
long double nextafterl "npy_nextafterl"(long double x, long double y)
long double spacingl "npy_spacingl"(long double x)
# Float C99 functions
float sinf "npy_sinf"(float x)
float cosf "npy_cosf"(float x)
float tanf "npy_tanf"(float x)
float sinhf "npy_sinhf"(float x)
float coshf "npy_coshf"(float x)
float tanhf "npy_tanhf"(float x)
float fabsf "npy_fabsf"(float x)
float floorf "npy_floorf"(float x)
float ceilf "npy_ceilf"(float x)
float rintf "npy_rintf"(float x)
float sqrtf "npy_sqrtf"(float x)
float log10f "npy_log10f"(float x)
float logf "npy_logf"(float x)
float expf "npy_expf"(float x)
float expm1f "npy_expm1f"(float x)
float asinf "npy_asinf"(float x)
float acosf "npy_acosf"(float x)
float atanf "npy_atanf"(float x)
float asinhf "npy_asinhf"(float x)
float acoshf "npy_acoshf"(float x)
float atanhf "npy_atanhf"(float x)
float log1pf "npy_log1pf"(float x)
float exp2f "npy_exp2f"(float x)
float log2f "npy_log2f"(float x)
float atan2f "npy_atan2f"(float x)
float hypotf "npy_hypotf"(float x)
float powf "npy_powf"(float x)
float fmodf "npy_fmodf"(float x)
float modff "npy_modff"(float x)
# Long double C99 functions
long double sinl "npy_sinl"(long double x)
long double cosl "npy_cosl"(long double x)
long double tanl "npy_tanl"(long double x)
long double sinhl "npy_sinhl"(long double x)
long double coshl "npy_coshl"(long double x)
long double tanhl "npy_tanhl"(long double x)
long double fabsl "npy_fabsl"(long double x)
long double floorl "npy_floorl"(long double x)
long double ceill "npy_ceill"(long double x)
long double rintl "npy_rintl"(long double x)
long double sqrtl "npy_sqrtl"(long double x)
long double log10l "npy_log10l"(long double x)
long double logl "npy_logl"(long double x)
long double expl "npy_expl"(long double x)
long double expm1l "npy_expm1l"(long double x)
long double asinl "npy_asinl"(long double x)
long double acosl "npy_acosl"(long double x)
long double atanl "npy_atanl"(long double x)
long double asinhl "npy_asinhl"(long double x)
long double acoshl "npy_acoshl"(long double x)
long double atanhl "npy_atanhl"(long double x)
long double log1pl "npy_log1pl"(long double x)
long double exp2l "npy_exp2l"(long double x)
long double log2l "npy_log2l"(long double x)
long double atan2l "npy_atan2l"(long double x)
long double hypotl "npy_hypotl"(long double x)
long double powl "npy_powl"(long double x)
long double fmodl "npy_fmodl"(long double x)
long double modfl "npy_modfl"(long double x)
# NumPy extensions
float deg2radf "npy_deg2radf"(float x)
float rad2degf "npy_rad2degf"(float x)
float logaddexpf "npy_logaddexpf"(float x)
float logaddexp2f "npy_logaddexp2f"(float x)
double deg2rad "npy_deg2rad"(double x)
double rad2deg "npy_rad2deg"(double x)
double logaddexp "npy_logaddexp"(double x)
double logaddexp2 "npy_logaddexp2"(double x)
long double deg2radl "npy_deg2radl"(long double x)
long double rad2degl "npy_rad2degl"(long double x)
long double logaddexpl "npy_logaddexpl"(long double x)
long double logaddexp2l "npy_logaddexp2l"(long double x)
# cython.* namespace for pure mode.
__version__ = "0.20"
__version__ = "0.20.post0"
# BEGIN shameless copy from Cython/minivect/minitypes.py
......
......@@ -8,13 +8,15 @@ import unittest
import os, sys
import tempfile
class NodeTypeWriter(TreeVisitor):
def __init__(self):
super(NodeTypeWriter, self).__init__()
self._indents = 0
self.result = []
def visit_Node(self, node):
if len(self.access_path) == 0:
if not self.access_path:
name = u"(root)"
else:
tip = self.access_path[-1]
......@@ -29,6 +31,7 @@ class NodeTypeWriter(TreeVisitor):
self.visitchildren(node)
self._indents -= 1
def treetypes(root):
"""Returns a string representing the tree by class names.
There's a leading and trailing whitespace so that it can be
......@@ -38,6 +41,7 @@ def treetypes(root):
w.visit(root)
return u"\n".join([u""] + w.result + [u""])
class CythonTest(unittest.TestCase):
def setUp(self):
......@@ -110,6 +114,7 @@ class CythonTest(unittest.TestCase):
except:
self.fail(str(sys.exc_info()[1]))
class TransformTest(CythonTest):
"""
Utility base class for transform unit tests. It is based around constructing
......@@ -134,7 +139,6 @@ class TransformTest(CythonTest):
Plans: One could have a pxd dictionary parameter to run_pipeline.
"""
def run_pipeline(self, pipeline, pyx, pxds={}):
tree = self.fragment(pyx, pxds).root
# Run pipeline
......@@ -166,6 +170,7 @@ class TreeAssertVisitor(VisitorTransform):
visit_Node = VisitorTransform.recurse_to_children
def unpack_source_tree(tree_file, dir=None):
if dir is None:
dir = tempfile.mkdtemp()
......@@ -176,7 +181,8 @@ def unpack_source_tree(tree_file, dir=None):
lines = f.readlines()
finally:
f.close()
f = None
del f
try:
for line in lines:
if line[:5] == '#####':
filename = line.strip().strip('#').strip().replace('/', os.path.sep)
......@@ -184,13 +190,15 @@ def unpack_source_tree(tree_file, dir=None):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if cur_file is not None:
cur_file.close()
f, cur_file = cur_file, None
f.close()
cur_file = open(path, 'w')
elif cur_file is not None:
cur_file.write(line)
elif line.strip() and not line.lstrip().startswith('#'):
if line.strip() not in ('"""', "'''"):
header.append(line)
finally:
if cur_file is not None:
cur_file.close()
return dir, ''.join(header)
......@@ -222,14 +222,14 @@ class _XMLTestResult(_TextTestResult):
testsuite.setAttribute('name', str(suite_name))
testsuite.setAttribute('tests', str(len(tests)))
testsuite.setAttribute('time', '%.3f' % \
sum(map(lambda e: e.get_elapsed_time(), tests)))
testsuite.setAttribute('time', '%.3f' %
sum([e.get_elapsed_time() for e in tests]))
failures = filter(lambda e: e.outcome==_TestInfo.FAILURE, tests)
testsuite.setAttribute('failures', str(len(failures)))
failures = len([1 for e in tests if e.outcome == _TestInfo.FAILURE])
testsuite.setAttribute('failures', str(failures))
errors = filter(lambda e: e.outcome==_TestInfo.ERROR, tests)
testsuite.setAttribute('errors', str(len(errors)))
errors = len([1 for e in tests if e.outcome == _TestInfo.ERROR])
testsuite.setAttribute('errors', str(errors))
return testsuite
......
......@@ -593,8 +593,11 @@ __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
/* Parse all numbers in the format string */
while (*ts && *ts != ')') {
if (isspace(*ts))
continue;
// ignore space characters (not using isspace() due to C/C++ problem on MacOS-X)
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break; /* not a 'break' in the loop */
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
......
......@@ -21,7 +21,6 @@
typedef struct {
PyCFunctionObject func;
int flags;
PyObject *func_dict;
PyObject *func_weakreflist;
PyObject *func_name;
......@@ -35,6 +34,7 @@ typedef struct {
/* Dynamic default args and annotations */
void *defaults;
int defaults_pyobjects;
int flags;
/* Defaults info */
PyObject *defaults_tuple; /* Const defaults tuple */
......
......@@ -108,7 +108,7 @@
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
......@@ -161,10 +161,17 @@
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#define __Pyx_PyUnicode_Concat(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
PyNumber_Add(a, b) : PyUnicode_Concat(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
......@@ -240,7 +247,7 @@
#define PyBoolObject PyLongObject
#endif
#if PY_VERSION_HEX < 0x03020000
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
......
......@@ -1029,7 +1029,8 @@ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
/////////////// PyObjectLookupSpecial.proto ///////////////
//@requires: PyObjectGetAttrStr
#if CYTHON_COMPILING_IN_CPYTHON
#if CYTHON_COMPILING_IN_CPYTHON && (PY_VERSION_HEX >= 0x03020000 || PY_MAJOR_VERSION < 3 && PY_VERSION_HEX >= 0x02070000)
// looks like calling _PyType_Lookup() isn't safe in Py<=2.6/3.1
static CYTHON_INLINE PyObject* __Pyx_PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name) {
PyObject *res;
PyTypeObject *tp = Py_TYPE(obj);
......@@ -1052,7 +1053,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_LookupSpecial(PyObject* obj, PyObj
return res;
}
#else
#define __Pyx_PyObject_LookupSpecial(o,n) PyObject_GetAttr(o,n)
#define __Pyx_PyObject_LookupSpecial(o,n) __Pyx_PyObject_GetAttrStr(o,n)
#endif
/////////////// PyObjectGetAttrStr.proto ///////////////
......@@ -1093,6 +1094,7 @@ static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr
/////////////// PyObjectCallMethod.proto ///////////////
//@requires: PyObjectGetAttrStr
//@requires: PyObjectCall
//@substitute: naming
static PyObject* __Pyx_PyObject_CallMethodTuple(PyObject* obj, PyObject* method_name, PyObject* args) {
......@@ -1100,7 +1102,7 @@ static PyObject* __Pyx_PyObject_CallMethodTuple(PyObject* obj, PyObject* method_
if (unlikely(!args)) return NULL;
method = __Pyx_PyObject_GetAttrStr(obj, method_name);
if (unlikely(!method)) goto bad;
result = PyObject_Call(method, args, NULL);
result = __Pyx_PyObject_Call(method, args, NULL);
Py_DECREF(method);
bad:
Py_DECREF(args);
......@@ -1123,3 +1125,38 @@ bad:
static CYTHON_INLINE PyObject* __Pyx_tp_new_kwargs(PyObject* type_obj, PyObject* args, PyObject* kwargs) {
return (PyObject*) (((PyTypeObject*)type_obj)->tp_new((PyTypeObject*)type_obj, args, kwargs));
}
/////////////// PyObjectCall.proto ///////////////
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); /*proto*/
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/////////////// PyObjectCall ///////////////
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
#if PY_VERSION_HEX >= 0x02060000
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
#endif
result = (*call)(func, arg, kw);
#if PY_VERSION_HEX >= 0x02060000
Py_LeaveRecursiveCall();
#endif
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
......@@ -47,10 +47,16 @@ static int __Pyx_Print(PyObject* f, PyObject *arg_tuple, int newline) {
if (PyString_Check(v)) {
char *s = PyString_AsString(v);
Py_ssize_t len = PyString_Size(v);
if (len > 0 &&
isspace(Py_CHARMASK(s[len-1])) &&
s[len-1] != ' ')
if (len > 0) {
// append soft-space if necessary (not using isspace() due to C/C++ problem on MacOS-X)
switch (s[len-1]) {
case ' ': break;
case '\f': case '\r': case '\n': case '\t': case '\v':
PyFile_SoftSpace(f, 0);
break;
default: break;
}
}
}
}
if (newline) {
......
......@@ -206,11 +206,13 @@ static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_
} else
#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */
#if !CYTHON_COMPILING_IN_PYPY
#if PY_VERSION_HEX >= 0x02060000
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
#endif
{
char* result;
......
......@@ -398,7 +398,7 @@ Globally
One can set compiler directives through a special header comment at the top of the file, like this::
#!python
#cython: boundscheck=False
#cython: language_level=3, boundscheck=False
The comment must appear before any code (but can appear after other
comments or whitespace).
......@@ -426,7 +426,8 @@ statement, like this::
@cython.boundscheck(False) # turn off boundscheck for this function
def f():
...
with cython.boundscheck(True): # turn it temporarily on again for this block
# turn it temporarily on again for this block
with cython.boundscheck(True):
...
.. Warning:: These two methods of setting directives are **not**
......
......@@ -16,46 +16,46 @@ implicitly insert these encoding/decoding steps.
Python string types in Cython code
----------------------------------
Cython supports four Python string types: ``bytes``, ``str``,
``unicode`` and ``basestring``. The ``bytes`` and ``unicode`` types
are the specific types known from normal Python 2.x (named ``bytes``
and ``str`` in Python 3). Additionally, Cython also supports the
``bytearray`` type starting with Python 2.6. It behaves like the
``bytes`` type, except that it is mutable.
The ``str`` type is special in that it is the byte string in Python 2
Cython supports four Python string types: :obj:`bytes`, :obj:`str`,
:obj:`unicode` and :obj:`basestring`. The :obj:`bytes` and :obj:`unicode` types
are the specific types known from normal Python 2.x (named :obj:`bytes`
and :obj:`str` in Python 3). Additionally, Cython also supports the
:obj:`bytearray` type starting with Python 2.6. It behaves like the
:obj:`bytes` type, except that it is mutable.
The :obj:`str` type is special in that it is the byte string in Python 2
and the Unicode string in Python 3 (for Cython code compiled with
language level 2, i.e. the default). Meaning, it always corresponds
exactly with the type that the Python runtime itself calls ``str``.
Thus, in Python 2, both ``bytes`` and ``str`` represent the byte string
type, whereas in Python 3, both ``str`` and ``unicode`` represent the
exactly with the type that the Python runtime itself calls :obj:`str`.
Thus, in Python 2, both :obj:`bytes` and :obj:`str` represent the byte string
type, whereas in Python 3, both :obj:`str` and :obj:`unicode` represent the
Python Unicode string type. The switch is made at C compile time, the
Python version that is used to run Cython is not relevant.
When compiling Cython code with language level 3, the ``str`` type is
When compiling Cython code with language level 3, the :obj:`str` type is
identified with exactly the Unicode string type at Cython compile time,
i.e. it does not identify with ``bytes`` when running in Python 2.
i.e. it does not identify with :obj:`bytes` when running in Python 2.
Note that the ``str`` type is not compatible with the ``unicode``
Note that the :obj:`str` type is not compatible with the :obj:`unicode`
type in Python 2, i.e. you cannot assign a Unicode string to a variable
or argument that is typed ``str``. The attempt will result in either
a compile time error (if detectable) or a ``TypeError`` exception at
or argument that is typed :obj:`str`. The attempt will result in either
a compile time error (if detectable) or a :obj:`TypeError` exception at
runtime. You should therefore be careful when you statically type a
string variable in code that must be compatible with Python 2, as this
Python version allows a mix of byte strings and unicode strings for data
and users normally expect code to be able to work with both. Code that
only targets Python 3 can safely type variables and arguments as either
``bytes`` or ``unicode``.
:obj:`bytes` or :obj:`unicode`.
The ``basestring`` type represents both the types ``str`` and ``unicode``,
The :obj:`basestring` type represents both the types :obj:`str` and :obj:`unicode`,
i.e. all Python text string types in Python 2 and Python 3. This can be
used for typing text variables that normally contain Unicode text (at
least in Python 3) but must additionally accept the ``str`` type in
least in Python 3) but must additionally accept the :obj:`str` type in
Python 2 for backwards compatibility reasons. It is not compatible with
the ``bytes`` type. Its usage should be rare in normal Cython code as
the generic ``object`` type (i.e. untyped code) will normally be good
the :obj:`bytes` type. Its usage should be rare in normal Cython code as
the generic :obj:`object` type (i.e. untyped code) will normally be good
enough and has the additional advantage of supporting the assignment of
string subtypes. Support for the ``basestring`` type is new in Cython
string subtypes. Support for the :obj:`basestring` type is new in Cython
0.20.
......@@ -100,7 +100,7 @@ Python variable::
cdef char* c_string = c_call_returning_a_c_string()
cdef bytes py_string = c_string
A type cast to ``object`` or ``bytes`` will do the same thing::
A type cast to :obj:`object` or :obj:`bytes` will do the same thing::
py_string = <bytes> c_string
......@@ -163,13 +163,116 @@ however, when the C function stores the pointer for later use. Apart
from keeping a Python reference to the string object, no manual memory
management is required.
Starting with Cython 0.20, the ``bytearray`` type is supported and
coerces in the same way as the ``bytes`` type. However, when using it
Starting with Cython 0.20, the :obj:`bytearray` type is supported and
coerces in the same way as the :obj:`bytes` type. However, when using it
in a C context, special care must be taken not to grow or shrink the
object buffer after converting it to a C string pointer. These
modifications can change the internal buffer address, which will make
the pointer invalid.
Accepting strings from Python code
----------------------------------
The other side, receiving input from Python code, may appear simple
at first sight, as it only deals with objects. However, getting this
right without making the API too narrow or too unsafe may not be
entirely obvious.
In the case that the API only deals with byte strings, i.e. binary
data or encoded text, it is best not to type the input argument as
something like :obj:`bytes`, because that would restrict the allowed
input to exactly that type and exclude both subtypes and other kinds
of byte containers, e.g. :obj:`bytearray` objects or memory views.
Depending on how (and where) the data is being processed, it may be a
good idea to instead receive a 1-dimensional memory view, e.g.
::
def process_byte_data(unsigned char[:] data):
length = data.shape[0]
first_byte = data[0]
slice_view = data[1:-1]
...
Cython's memory views are described in more detail in
:doc:`../userguide/memoryviews`, but the above example already shows
most of the relevant functionality for 1-dimensional byte views. They
allow for efficient processing of arrays and accept anything that can
unpack itself into a byte buffer, without intermediate copying. The
processed content can finally be returned in the memory view itself
(or a slice of it), but it is often better to copy the data back into
a flat and simple :obj:`bytes` or :obj:`bytearray` object, especially
when only a small slice is returned. Since memoryviews do not copy the
data, they would otherwise keep the entire original buffer alive. The
general idea here is to be liberal with input by accepting any kind of
byte buffer, but strict with output by returning a simple, well adapted
object. This can simply be done as follows::
def process_byte_data(unsigned char[:] data):
# ... process the data
if return_all:
return bytes(data)
else:
# example for returning a slice
return bytes(data[5:35])
If the byte input is actually encoded text, and the further processing
should happen at the Unicode level, then the right thing to do is to
decode the input straight away. This is almost only a problem in Python
2.x, where Python code expects that it can pass a byte string (:obj:`str`)
with encoded text into a text API. Since this usually happens in more
than one place in the module's API, a helper function is almost always the
way to go, since it allows for easy adaptation of the input normalisation
process later.
This kind of input normalisation function will commonly look similar to
the following::
from cpython.version cimport PY_MAJOR_VERSION
cdef unicode _ustring(s):
if type(s) is unicode:
# fast path for most common case(s)
return <unicode>s
elif PY_MAJOR_VERSION < 3 and isinstance(s, bytes):
# only accept byte strings in Python 2.x, not in Py3
return (<bytes>s).decode('ascii')
elif isinstance(s, unicode):
# an evil cast to <unicode> might work here in some(!) cases,
# depending on what the further processing does. to be safe,
# we can always create a copy instead
return unicode(s)
else:
raise TypeError(...)
And should then be used like this::
def api_func(s):
text = _ustring(s)
...
Similarly, if the further processing happens at the byte level, but Unicode
string input should be accepted, then the following might work, if you are
using memory views::
# define a global name for whatever char type is used in the module
ctypedef unsigned char char_type
cdef char_type[:] _chars(s):
if isinstance(s, unicode):
# encode to the specific encoding used inside of the module
s = (<unicode>s).encode('utf8')
return s
In this case, you might want to additionally ensure that byte string
input really uses the correct encoding, e.g. if you require pure ASCII
input data, you can run over the buffer in a loop and check the highest
bit of each byte. This should then also be done in the input normalisation
function.
Dealing with "const"
--------------------
......@@ -224,6 +327,7 @@ In Cython 0.18, these standard declarations have been changed to
use the correct ``const`` modifier, so your code will automatically
benefit from the new ``const`` support if it uses them.
Decoding bytes to text
----------------------
......@@ -234,7 +338,7 @@ the C byte strings to Python Unicode strings on reception, and to
encode Python Unicode strings to C byte strings on the way out.
With a Python byte string object, you would normally just call the
``.decode()`` method to decode it into a Unicode string::
``bytes.decode()`` method to decode it into a Unicode string::
ustring = byte_string.decode('UTF-8')
......@@ -318,6 +422,7 @@ assignment. Later access to the invalidated pointer will read invalid
memory and likely result in a segfault. Cython will therefore refuse
to compile this code.
C++ strings
-----------
......@@ -375,7 +480,7 @@ There are two use cases where this is inconvenient. First, if all
C strings that are being processed (or the large majority) contain
text, automatic encoding and decoding from and to Python unicode
objects can reduce the code overhead a little. In this case, you
can set the ``c_string_type`` directive in your module to ``unicode``
can set the ``c_string_type`` directive in your module to :obj:`unicode`
and the ``c_string_encoding`` to the encoding that your C code uses,
for example::
......@@ -393,7 +498,7 @@ The second use case is when all C strings that are being processed
only contain ASCII encodable characters (e.g. numbers) and you want
your code to use the native legacy string type in Python 2 for them,
instead of always using Unicode. In this case, you can set the
string type to ``str``::
string type to :obj:`str`::
# cython: c_string_type=str, c_string_encoding=ascii
......@@ -472,15 +577,15 @@ whereas the following ``ISO-8859-15`` encoded source file will print
Note that the unicode literal ``u'abcö'`` is a correctly decoded four
character Unicode string in both cases, whereas the unprefixed Python
``str`` literal ``'abcö'`` will become a byte string in Python 2 (thus
:obj:`str` literal ``'abcö'`` will become a byte string in Python 2 (thus
having length 4 or 5 in the examples above), and a 4 character Unicode
string in Python 3. If you are not familiar with encodings, this may
not appear obvious at first read. See `CEP 108`_ for details.
As a rule of thumb, it is best to avoid unprefixed non-ASCII ``str``
As a rule of thumb, it is best to avoid unprefixed non-ASCII :obj:`str`
literals and to use unicode string literals for all text. Cython also
supports the ``__future__`` import ``unicode_literals`` that instructs
the parser to read all unprefixed ``str`` literals in a source file as
the parser to read all unprefixed :obj:`str` literals in a source file as
unicode string literals, just like Python 3.
.. _`CEP 108`: http://wiki.cython.org/enhancements/stringliterals
......@@ -522,7 +627,7 @@ explicitly, and the following will print ``A`` (or ``b'A'`` in Python
The explicit coercion works for any C integer type. Values outside of
the range of a :c:type:`char` or :c:type:`unsigned char` will raise an
``OverflowError`` at runtime. Coercion will also happen automatically
:obj:`OverflowError` at runtime. Coercion will also happen automatically
when assigning to a typed variable, e.g.::
cdef bytes py_byte_string
......@@ -544,10 +649,10 @@ The following will print 65::
cdef Py_UCS4 uchar_val = u'A'
print( <long>uchar_val )
Note that casting to a C ``long`` (or ``unsigned long``) will work
Note that casting to a C :c:type:`long` (or :c:type:`unsigned long`) will work
just fine, as the maximum code point value that a Unicode character
can have is 1114111 (``0x10FFFF``). On platforms with 32bit or more,
``int`` is just as good.
:c:type:`int` is just as good.
Narrow Unicode builds
......@@ -682,15 +787,15 @@ zero-terminated UTF-16 encoded :c:type:`wchar_t*` strings, so called
"wide strings".
By default, Windows builds of CPython define :c:type:`Py_UNICODE` as
a synonym for :c:type:`wchar_t`. This makes internal ``unicode``
a synonym for :c:type:`wchar_t`. This makes internal :obj:`unicode`
representation compatible with UTF-16 and allows for efficient zero-copy
conversions. This also means that Windows builds are always
`Narrow Unicode builds`_ with all the caveats.
To aid interoperation with Windows APIs, Cython 0.19 supports wide
strings (in the form of :c:type:`Py_UNICODE*`) and implicitly converts
them to and from ``unicode`` string objects. These conversions behave the
same way as they do for :c:type:`char*` and ``bytes`` as described in
them to and from :obj:`unicode` string objects. These conversions behave the
same way as they do for :c:type:`char*` and :obj:`bytes` as described in
`Passing byte strings`_.
In addition to automatic conversion, unicode literals that appear
......@@ -722,7 +827,7 @@ Here is an example of how one would call a Unicode API on Windows::
APIs deprecated and inefficient.
One consequence of CPython 3.3 changes is that :py:func:`len` of
``unicode`` strings is always measured in *code points* ("characters"),
:obj:`unicode` strings is always measured in *code points* ("characters"),
while Windows API expect the number of UTF-16 *code units*
(where each surrogate is counted individually). To always get the number
of code units, call :c:func:`PyUnicode_GetSize` directly.
......@@ -207,6 +207,11 @@ EXT_EXTRAS = {
'tag:trace': update_linetrace_extension,
}
def _is_py3_before_32(excluded, version):
return version[0] >= 3 and version < (3,2)
# TODO: use tags
VER_DEP_MODULES = {
# tests are excluded if 'CurrentPythonVersion OP VersionTuple', i.e.
......@@ -248,10 +253,12 @@ VER_DEP_MODULES = {
'memoryview.numpy_memoryview',
'memoryview.memoryviewattrs',
'memoryview.memoryview',
'run.withstat_py',
]),
(2,7) : (operator.lt, lambda x: x in ['run.withstat_py', # multi context with statement
(2,7) : (operator.lt, lambda x: x in ['run.withstat_py27', # multi context with statement
'run.yield_inside_lambda',
'run.test_dictviews',
'run.pyclass_special_methods',
]),
# The next line should start (3,); but this is a dictionary, so
# we can only have one (3,) key. Since 2.7 is supposed to be the
......@@ -263,9 +270,14 @@ VER_DEP_MODULES = {
(3,): (operator.ge, lambda x: x in ['run.non_future_division',
'compile.extsetslice',
'compile.extdelslice',
'run.special_methods_T561_py2']),
'run.special_methods_T561_py2'
]),
(3,1): (_is_py3_before_32, lambda x: x in ['run.pyclass_special_methods',
]),
(3,3) : (operator.lt, lambda x: x in ['build.package_compilation',
]),
(3,4,0,'beta',3) : (operator.le, lambda x: x in ['run.py34_signature',
]),
}
# files that should not be converted to Python 3 code with 2to3
......@@ -332,9 +344,18 @@ def parse_tags(filepath):
f.close()
return tags
list_unchanging_dir = memoize(lambda x: os.listdir(x))
@memoize
def _list_pyregr_data_files(test_directory):
is_data_file = re.compile('(?:[.](txt|pem|db|html)|^bad.*[.]py)$').search
return ['__init__.py'] + [
filename for filename in list_unchanging_dir(test_directory)
if is_data_file(filename)]
def import_ext(module_name, file_path=None):
if file_path:
import imp
......@@ -514,18 +535,18 @@ class TestBuilder(object):
elif 'no-cpp' in tags['tag'] and 'cpp' in self.languages:
languages = list(languages)
languages.remove('cpp')
tests = [ self.build_test(test_class, path, workdir, module,
tests = [ self.build_test(test_class, path, workdir, module, tags,
language, expect_errors, warning_errors)
for language in languages ]
return tests
def build_test(self, test_class, path, workdir, module,
def build_test(self, test_class, path, workdir, module, tags,
language, expect_errors, warning_errors):
language_workdir = os.path.join(workdir, language)
if not os.path.exists(language_workdir):
os.makedirs(language_workdir)
workdir = os.path.join(language_workdir, module)
return test_class(path, workdir, module,
return test_class(path, workdir, module, tags,
language=language,
expect_errors=expect_errors,
annotate=self.annotate,
......@@ -538,11 +559,12 @@ class TestBuilder(object):
warning_errors=warning_errors)
class CythonCompileTestCase(unittest.TestCase):
def __init__(self, test_directory, workdir, module, language='c',
def __init__(self, test_directory, workdir, module, tags, language='c',
expect_errors=False, annotate=False, cleanup_workdir=True,
cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False,
fork=True, language_level=2, warning_errors=False):
self.test_directory = test_directory
self.tags = tags
self.workdir = workdir
self.module = module
self.language = language
......@@ -603,7 +625,7 @@ class CythonCompileTestCase(unittest.TestCase):
if not cleanup_c_files:
if (rmfile[-2:] in (".c", ".h") or
rmfile[-4:] == ".cpp" or
rmfile.endswith(".html")):
rmfile.endswith(".html") and rmfile.startswith(self.module)):
continue
if not cleanup_lib_files and (rmfile.endswith(".so") or rmfile.endswith(".dll")):
continue
......@@ -641,9 +663,17 @@ class CythonCompileTestCase(unittest.TestCase):
if is_related(filename)]
def copy_files(self, test_directory, target_directory, file_list):
# use symlink on Unix, copy on Windows
try:
copy = os.symlink
except AttributeError:
copy = shutil.copy
join = os.path.join
for filename in file_list:
shutil.copy(os.path.join(test_directory, filename),
target_directory)
file_path = join(test_directory, filename)
if os.path.exists(file_path):
copy(file_path, join(target_directory, filename))
def source_files(self, workdir, module_name, file_list):
return ([self.build_target_filename(module_name)] +
......@@ -708,12 +738,6 @@ class CythonCompileTestCase(unittest.TestCase):
def run_distutils(self, test_directory, module, workdir, incdir,
extra_extension_args=None):
original_source = self.find_module_source_file(
os.path.join(test_directory, module + '.pyx'))
try:
tags = parse_tags(original_source)
except IOError:
tags = {}
cwd = os.getcwd()
os.chdir(workdir)
try:
......@@ -748,12 +772,13 @@ class CythonCompileTestCase(unittest.TestCase):
# Set the language now as the fixer might need it
extension.language = 'c++'
for matcher, fixer in EXT_EXTRAS.items():
for matcher, fixer in list(EXT_EXTRAS.items()):
if isinstance(matcher, str):
# lazy init
del EXT_EXTRAS[matcher]
matcher = string_selector(matcher)
EXT_EXTRAS[matcher] = fixer
if matcher(module, tags):
if matcher(module, self.tags):
newext = fixer(extension)
if newext is EXCLUDE_EXT:
return
......@@ -891,9 +916,10 @@ def run_forked_test(result, run_func, test_name, fork=True):
child_id = os.fork()
if not child_id:
result_code = 0
output = None
try:
try:
tests = None
tests = partial_result = None
try:
partial_result = PartialTestResult(result)
run_func(partial_result)
......@@ -901,6 +927,8 @@ def run_forked_test(result, run_func, test_name, fork=True):
sys.stderr.flush()
gc.collect()
except Exception:
result_code = 1
if partial_result is not None:
if tests is None:
# importing failed, try to fake a test class
tests = _FakeClass(
......@@ -908,14 +936,20 @@ def run_forked_test(result, run_func, test_name, fork=True):
_shortDescription=test_name,
module_name=None)
partial_result.addError(tests, sys.exc_info())
result_code = 1
output = open(result_file, 'wb')
pickle.dump(partial_result.data(), output)
except:
traceback.print_exc()
finally:
try: output.close()
try: sys.stderr.flush()
except: pass
try: sys.stdout.flush()
except: pass
try:
if output is not None:
output.close()
except:
pass
os._exit(result_code)
try:
......@@ -1048,6 +1082,9 @@ class CythonPyregrTestCase(CythonRunTestCase):
set_initial_path="SOURCEFILE"))
patch_inspect_isfunction()
def related_files(self, test_directory, module_name):
return _list_pyregr_data_files(test_directory)
def _run_unittest(self, result, *classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
......@@ -1677,13 +1714,13 @@ def main():
_, return_code = runtests(options, cmd_args, coverage)
print("ALL DONE")
try:
check_thread_termination(ignore_seen=False)
sys.exit(return_code)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(return_code)
else:
sys.exit(return_code)
def runtests_callback(args):
......
......@@ -61,11 +61,10 @@ import a, b, c
######## fake_grep.py ########
import platform
import re
import sys
if platform == 'Windows':
if sys.platform == 'win32':
opt, pattern, file = sys.argv[1:]
assert opt == '-c'
count = 0
......
......@@ -175,6 +175,7 @@ def test_cdef_attribute():
>>> test_cdef_attribute()
Memoryview is not initialized
local variable 'myview' referenced before assignment
local variable 'myview' referenced before assignment
get_ext_obj called
Memoryview is not initialized
<MemoryView of 'array' object>
......@@ -195,8 +196,11 @@ def test_cdef_attribute():
else:
print "No UnboundLocalError was raised"
# uninitialized assignment is valid
cdef int[:] otherview = myview
cdef int[:] otherview
try:
otherview = myview
except UnboundLocalError, e:
print e.args[0]
try:
print get_ext_obj().mview
......
......@@ -406,7 +406,7 @@ cdef class DeallocateMe(object):
# Disabled! References cycles don't seem to be supported by NumPy
# @testcase
def acquire_release_cycle(obj):
"""
DISABLED_DOCSTRING = """
>>> a = np.arange(20, dtype=np.object)
>>> a[10] = DeallocateMe()
>>> acquire_release_cycle(a)
......
......@@ -20,6 +20,40 @@ def min3(a,b,c):
"""
return min(a,b,c)
@cython.test_assert_path_exists("//CondExprNode")
@cython.test_fail_if_path_exists("//SimpleCallNode")
def min3_list(a,b,c):
"""
>>> min3_list(1,2,3)
1
>>> min3_list(2,3,1)
1
>>> min3_list(2,1,3)
1
>>> min3_list(3,1,2)
1
>>> min3_list(3,2,1)
1
"""
return min([a,b,c])
@cython.test_assert_path_exists("//CondExprNode")
@cython.test_fail_if_path_exists("//SimpleCallNode")
def min3_tuple(a,b,c):
"""
>>> min3_tuple(1,2,3)
1
>>> min3_tuple(2,3,1)
1
>>> min3_tuple(2,1,3)
1
>>> min3_tuple(3,1,2)
1
>>> min3_tuple(3,2,1)
1
"""
return min((a,b,c))
@cython.test_assert_path_exists("//CondExprNode")
@cython.test_fail_if_path_exists("//SimpleCallNode")
def min3_typed(int a, int b, int c):
......
......@@ -199,7 +199,7 @@ def bytearray_decode_unbound_method(bytearray s, start=None, stop=None):
return bytearray.decode(s[start:stop], 'utf8')
def bytearray_append(bytearray b, char c, int i, object o):
def bytearray_append(bytearray b, signed char c, int i, object o):
"""
>>> b = bytearray(b'abc')
>>> b = bytearray_append(b, ord('x'), ord('y'), ord('z'))
......
......@@ -4,6 +4,56 @@
import sys
IS_PY3 = sys.version_info[0] >= 3
IS_PY34 = sys.version_info > (3, 4, 0, 'beta', 3)
def inspect_isroutine():
"""
>>> inspect_isroutine()
True
"""
import inspect
return inspect.isroutine(inspect_isroutine)
def inspect_isfunction():
"""
>>> inspect_isfunction()
False
False
"""
import inspect, types
print isinstance(inspect_isfunction, types.FunctionType)
return inspect.isfunction(inspect_isfunction)
def inspect_isbuiltin():
"""
>>> inspect_isbuiltin()
False
False
"""
import inspect, types
print isinstance(inspect_isfunction, types.BuiltinFunctionType)
return inspect.isbuiltin(inspect_isbuiltin)
def inspect_signature(a, b, c=123, *, d=234):
"""
>>> sig = inspect_signature(1, 2)
>>> if IS_PY34: list(sig.parameters)
... else: ['a', 'b', 'c', 'd']
['a', 'b', 'c', 'd']
>>> if IS_PY34: sig.parameters['c'].default == 123
... else: True
True
>>> if IS_PY34: sig.parameters['d'].default == 234
... else: True
True
"""
import inspect
return inspect.signature(inspect_signature) if IS_PY34 else None
def test_dict():
"""
......@@ -42,6 +92,19 @@ def test_doc():
'docstring'
"""
def test_hash():
"""
>>> d = {test_hash: 123}
>>> test_hash in d
True
>>> d[test_hash]
123
>>> hash(test_hash) == hash(test_hash)
True
"""
def test_closure():
"""
>>> test_closure.func_closure is None
......
......@@ -4,6 +4,8 @@ import sys
if sys.version_info >= (3, 4):
def funcdoc(f):
if not f.__text_signature__:
return f.__doc__
doc = '%s%s' % (f.__name__, f.__text_signature__)
if f.__doc__:
if '\n' in f.__doc__:
......
__doc__ = u"""
>>> D
2
>>> D
2
>>> XYZ
5
"""
D = 1
include "testinclude.pxi"
include "includes/includefile.pxi"
# this file will be included
XYZ = 5
......@@ -286,6 +286,11 @@ def conditional_none(int a):
"""
return None if a in {1,2,3,4} else 1
@cython.test_assert_path_exists(
"//BoolBinopNode",
"//BoolBinopNode//PrimaryCmpNode"
)
@cython.test_fail_if_path_exists("//ListNode")
def n(a):
"""
>>> n('d *')
......
# cython: binding=True, language_level=3
# mode: run
# tag: cyfunction
import inspect
sig = inspect.Signature.from_function
def signatures_match(f1, f2):
if sig(f1) == sig(f2):
return None # nothing to show in doctest
return sig(f1), sig(f2)
def b(a, b, c):
"""
>>> def py_b(a, b, c): pass
>>> signatures_match(b, py_b)
"""
def c(a, b, c=1):
"""
>>> def py_c(a, b, c=1): pass
>>> signatures_match(c, py_c)
"""
def d(a, b, *, c = 88):
"""
>>> def py_d(a, b, *, c = 88): pass
>>> signatures_match(d, py_d)
"""
def e(a, b, c = 88, **kwds):
"""
>>> def py_e(a, b, c = 88, **kwds): pass
>>> signatures_match(e, py_e)
"""
def f(a, b, *, c, d = 42):
"""
>>> def py_f(a, b, *, c, d = 42): pass
>>> signatures_match(f, py_f)
"""
def g(a, b, *, c, d = 42, e = 17, f, **kwds):
"""
>>> def py_g(a, b, *, c, d = 42, e = 17, f, **kwds): pass
>>> signatures_match(g, py_g)
"""
def h(a, b, *args, c, d = 42, e = 17, f, **kwds):
"""
>>> def py_h(a, b, *args, c, d = 42, e = 17, f, **kwds): pass
>>> signatures_match(h, py_h)
"""
def k(a, b, c=1, *args, d = 42, e = 17, f, **kwds):
"""
>>> def py_k(a, b, c=1, *args, d = 42, e = 17, f, **kwds): pass
>>> signatures_match(k, py_k)
"""
def l(*, a, b, c = 88):
"""
>>> def py_l(*, a, b, c = 88): pass
>>> signatures_match(l, py_l)
"""
def m(a, *, b, c = 88):
"""
>>> def py_m(a, *, b, c = 88): pass
>>> signatures_match(m, py_m)
"""
a, b, c = b, c, a
def n(a, *, b, c = 88):
"""
>>> def py_n(a, *, b, c = 88): pass
>>> signatures_match(n, py_n)
"""
......@@ -647,3 +647,27 @@ def self_lookup(a):
def bar(foo):
qux = foo
quux = foo[qux.baz]
cdef enum MyEnum:
enum_x = 1
enum_y = 2
cdef class InferInProperties:
"""
>>> InferInProperties().x
('double', 'unicode object', 'MyEnum', 'MyEnum')
"""
cdef MyEnum attr
def __cinit__(self):
self.attr = enum_x
property x:
def __get__(self):
a = 1.0
b = u'abc'
c = self.attr
d = enum_y
c = d
return typeof(a), typeof(b), typeof(c), typeof(d)
......@@ -26,6 +26,37 @@ def test_object_assmt():
assert typeof(a) == "Python object", typeof(a)
assert typeof(b) == "long", typeof(b)
class RAdd(object):
other = None
def __radd__(self, other):
self._other = other
return self
def __repr__(self):
return '%s(%s)' % (type(self).__name__, self._other)
def test_inplace_assignment():
"""
>>> test_inplace_assignment()
RAdd([1, 2, 3])
"""
l = [1, 2, 3]
# inferred type of l is list, but assignment result is object
l += RAdd()
return l
def test_reassignment():
"""
>>> test_reassignment()
(1, 2, 3)
"""
l = [1, 2, 3]
l = (1, 2, 3)
return l
def test_long_vs_double(cond):
"""
>>> test_long_vs_double(0)
......
......@@ -129,3 +129,51 @@ def test_class(cond):
class A:
x = 1
return A.x
def test_try_except_regression(c):
"""
>>> test_try_except_regression(True)
(123,)
>>> test_try_except_regression(False)
Traceback (most recent call last):
...
UnboundLocalError: local variable 'a' referenced before assignment
"""
if c:
a = (123,)
try:
return a
except:
return a
def test_try_finally_regression(c):
"""
>>> test_try_finally_regression(True)
(123,)
>>> test_try_finally_regression(False)
Traceback (most recent call last):
...
UnboundLocalError: local variable 'a' referenced before assignment
"""
if c:
a = (123,)
try:
return a
finally:
return a
def test_expression_calculation_order_bug(a):
"""
>>> test_expression_calculation_order_bug(False)
[]
>>> test_expression_calculation_order_bug(True)
Traceback (most recent call last):
...
UnboundLocalError: local variable 'b' referenced before assignment
"""
if not a:
b = []
return (a or b) and (b or a)
import sys
def typename(t):
name = type(t).__name__
if sys.version_info < (2,5):
......@@ -9,9 +10,11 @@ def typename(t):
name = 'MyException'
return "<type '%s'>" % name
class MyException(Exception):
pass
class ContextManager(object):
def __init__(self, value, exit_ret = None):
self.value = value
......@@ -25,6 +28,7 @@ class ContextManager(object):
print("enter")
return self.value
def no_as():
"""
>>> no_as()
......@@ -35,6 +39,7 @@ def no_as():
with ContextManager("value"):
print("hello")
def basic():
"""
>>> basic()
......@@ -45,6 +50,7 @@ def basic():
with ContextManager("value") as x:
print(x)
def with_pass():
"""
>>> with_pass()
......@@ -54,6 +60,7 @@ def with_pass():
with ContextManager("value") as x:
pass
def with_return():
"""
>>> print(with_return())
......@@ -64,6 +71,7 @@ def with_return():
with ContextManager("value") as x:
return x
def with_break():
"""
>>> print(with_break())
......@@ -77,6 +85,7 @@ def with_break():
print("FAILED")
return c
def with_continue():
"""
>>> print(with_continue())
......@@ -94,6 +103,7 @@ def with_continue():
print("FAILED")
return c
def with_exception(exit_ret):
"""
>>> with_exception(None)
......@@ -113,6 +123,25 @@ def with_exception(exit_ret):
except:
print("outer except")
def with_real_lock():
"""
>>> with_real_lock()
about to acquire lock
holding lock
lock no longer held
"""
from threading import Lock
lock = Lock()
print("about to acquire lock")
with lock:
print("holding lock")
print("lock no longer held")
def functions_in_with():
"""
>>> f = functions_in_with()
......@@ -133,6 +162,7 @@ def functions_in_with():
print("outer except")
return f
def multitarget():
"""
>>> multitarget()
......@@ -143,6 +173,7 @@ def multitarget():
with ContextManager((1, 2, (3, (4, 5)))) as (a, b, (c, (d, e))):
print('%s %s %s %s %s' % (a, b, c, d, e))
def tupletarget():
"""
>>> tupletarget()
......@@ -153,39 +184,12 @@ def tupletarget():
with ContextManager((1, 2, (3, (4, 5)))) as t:
print(t)
def multimanager():
"""
>>> multimanager()
enter
enter
enter
enter
enter
enter
2
value
1 2 3 4 5
nested
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
"""
with ContextManager(1), ContextManager(2) as x, ContextManager('value') as y,\
ContextManager(3), ContextManager((1, 2, (3, (4, 5)))) as (a, b, (c, (d, e))):
with ContextManager('nested') as nested:
print(x)
print(y)
print('%s %s %s %s %s' % (a, b, c, d, e))
print(nested)
class GetManager(object):
def get(self, *args):
return ContextManager(*args)
def manager_from_expression():
"""
>>> manager_from_expression()
......@@ -201,94 +205,3 @@ def manager_from_expression():
g = GetManager()
with g.get(2) as x:
print(x)
# Tests borrowed from pyregr test_with.py,
# modified to follow the constraints of Cython.
import unittest
class Dummy(object):
def __init__(self, value=None, gobble=False):
if value is None:
value = self
self.value = value
self.gobble = gobble
self.enter_called = False
self.exit_called = False
def __enter__(self):
self.enter_called = True
return self.value
def __exit__(self, *exc_info):
self.exit_called = True
self.exc_info = exc_info
if self.gobble:
return True
class InitRaises(object):
def __init__(self): raise RuntimeError()
class EnterRaises(object):
def __enter__(self): raise RuntimeError()
def __exit__(self, *exc_info): pass
class ExitRaises(object):
def __enter__(self): pass
def __exit__(self, *exc_info): raise RuntimeError()
class NestedWith(unittest.TestCase):
"""
>>> NestedWith().runTest()
"""
def runTest(self):
self.testNoExceptions()
self.testExceptionInExprList()
self.testExceptionInEnter()
self.testExceptionInExit()
self.testEnterReturnsTuple()
def testNoExceptions(self):
with Dummy() as a, Dummy() as b:
self.assertTrue(a.enter_called)
self.assertTrue(b.enter_called)
self.assertTrue(a.exit_called)
self.assertTrue(b.exit_called)
def testExceptionInExprList(self):
try:
with Dummy() as a, InitRaises():
pass
except:
pass
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
def testExceptionInEnter(self):
try:
with Dummy() as a, EnterRaises():
self.fail('body of bad with executed')
except RuntimeError:
pass
else:
self.fail('RuntimeError not reraised')
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
def testExceptionInExit(self):
body_executed = False
with Dummy(gobble=True) as a, ExitRaises():
body_executed = True
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
self.assertTrue(body_executed)
self.assertNotEqual(a.exc_info[0], None)
def testEnterReturnsTuple(self):
with Dummy(value=(1,2)) as (a1, a2), \
Dummy(value=(10, 20)) as (b1, b2):
self.assertEqual(1, a1)
self.assertEqual(2, a2)
self.assertEqual(10, b1)
self.assertEqual(20, b2)
import sys
def typename(t):
name = type(t).__name__
if sys.version_info < (2,5):
if name == 'classobj' and issubclass(t, MyException):
name = 'type'
elif name == 'instance' and isinstance(t, MyException):
name = 'MyException'
return "<type '%s'>" % name
class MyException(Exception):
pass
class ContextManager(object):
def __init__(self, value, exit_ret = None):
self.value = value
self.exit_ret = exit_ret
def __exit__(self, a, b, tb):
print("exit %s %s %s" % (typename(a), typename(b), typename(tb)))
return self.exit_ret
def __enter__(self):
print("enter")
return self.value
def multimanager():
"""
>>> multimanager()
enter
enter
enter
enter
enter
enter
2
value
1 2 3 4 5
nested
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
"""
with ContextManager(1), ContextManager(2) as x, ContextManager('value') as y,\
ContextManager(3), ContextManager((1, 2, (3, (4, 5)))) as (a, b, (c, (d, e))):
with ContextManager('nested') as nested:
print(x)
print(y)
print('%s %s %s %s %s' % (a, b, c, d, e))
print(nested)
class GetManager(object):
def get(self, *args):
return ContextManager(*args)
def manager_from_expression():
"""
>>> manager_from_expression()
enter
1
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
enter
2
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
"""
with GetManager().get(1) as x:
print(x)
g = GetManager()
with g.get(2) as x:
print(x)
# Tests borrowed from pyregr test_with.py,
# modified to follow the constraints of Cython.
import unittest
class Dummy(object):
def __init__(self, value=None, gobble=False):
if value is None:
value = self
self.value = value
self.gobble = gobble
self.enter_called = False
self.exit_called = False
def __enter__(self):
self.enter_called = True
return self.value
def __exit__(self, *exc_info):
self.exit_called = True
self.exc_info = exc_info
if self.gobble:
return True
class InitRaises(object):
def __init__(self): raise RuntimeError()
class EnterRaises(object):
def __enter__(self): raise RuntimeError()
def __exit__(self, *exc_info): pass
class ExitRaises(object):
def __enter__(self): pass
def __exit__(self, *exc_info): raise RuntimeError()
class NestedWith(unittest.TestCase):
"""
>>> NestedWith().runTest()
"""
def runTest(self):
self.testNoExceptions()
self.testExceptionInExprList()
self.testExceptionInEnter()
self.testExceptionInExit()
self.testEnterReturnsTuple()
def testNoExceptions(self):
with Dummy() as a, Dummy() as b:
self.assertTrue(a.enter_called)
self.assertTrue(b.enter_called)
self.assertTrue(a.exit_called)
self.assertTrue(b.exit_called)
def testExceptionInExprList(self):
try:
with Dummy() as a, InitRaises():
pass
except:
pass
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
def testExceptionInEnter(self):
try:
with Dummy() as a, EnterRaises():
self.fail('body of bad with executed')
except RuntimeError:
pass
else:
self.fail('RuntimeError not reraised')
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
def testExceptionInExit(self):
body_executed = False
with Dummy(gobble=True) as a, ExitRaises():
body_executed = True
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
self.assertTrue(body_executed)
self.assertNotEqual(a.exc_info[0], None)
def testEnterReturnsTuple(self):
with Dummy(value=(1,2)) as (a1, a2), \
Dummy(value=(10, 20)) as (b1, b2):
self.assertEqual(1, a1)
self.assertEqual(2, a2)
self.assertEqual(10, b1)
self.assertEqual(20, b2)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment