pax_global_header 0000666 0000000 0000000 00000000064 13766135517 0014530 g ustar 00root root 0000000 0000000 52 comment=5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/ 0000775 0000000 0000000 00000000000 13766135517 0023253 5 ustar 00root root 0000000 0000000 cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/ 0000775 0000000 0000000 00000000000 13766135517 0024517 5 ustar 00root root 0000000 0000000 cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/ 0000775 0000000 0000000 00000000000 13766135517 0026271 5 ustar 00root root 0000000 0000000 AnalysedTreeTransforms.py 0000664 0000000 0000000 00000007362 13766135517 0033233 0 ustar 00root root 0000000 0000000 cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler from __future__ import absolute_import
from .Visitor import ScopeTrackingTransform
from .Nodes import StatListNode, SingleAssignmentNode, CFuncDefNode, DefNode
from .ExprNodes import DictNode, DictItemNode, NameNode, UnicodeNode
from .PyrexTypes import py_object_type
from .StringEncoding import EncodedString
from . import Symtab
class AutoTestDictTransform(ScopeTrackingTransform):
# Handles autotestdict directive
blacklist = ['__cinit__', '__dealloc__', '__richcmp__',
'__nonzero__', '__bool__',
'__len__', '__contains__']
def visit_ModuleNode(self, node):
if node.is_pxd:
return node
self.scope_type = 'module'
self.scope_node = node
if not self.current_directives['autotestdict']:
return node
self.all_docstrings = self.current_directives['autotestdict.all']
self.cdef_docstrings = self.all_docstrings or self.current_directives['autotestdict.cdef']
assert isinstance(node.body, StatListNode)
# First see if __test__ is already created
if u'__test__' in node.scope.entries:
# Do nothing
return node
pos = node.pos
self.tests = []
self.testspos = node.pos
test_dict_entry = node.scope.declare_var(EncodedString(u'__test__'),
py_object_type,
pos,
visibility='public')
create_test_dict_assignment = SingleAssignmentNode(pos,
lhs=NameNode(pos, name=EncodedString(u'__test__'),
entry=test_dict_entry),
rhs=DictNode(pos, key_value_pairs=self.tests))
self.visitchildren(node)
node.body.stats.append(create_test_dict_assignment)
return node
def add_test(self, testpos, path, doctest):
pos = self.testspos
keystr = u'%s (line %d)' % (path, testpos[1])
key = UnicodeNode(pos, value=EncodedString(keystr))
value = UnicodeNode(pos, value=doctest)
self.tests.append(DictItemNode(pos, key=key, value=value))
def visit_ExprNode(self, node):
# expressions cannot contain functions and lambda expressions
# do not have a docstring
return node
def visit_FuncDefNode(self, node):
if not node.doc or (isinstance(node, DefNode) and node.fused_py_func):
return node
if not self.cdef_docstrings:
if isinstance(node, CFuncDefNode) and not node.py_func:
return node
if not self.all_docstrings and '>>>' not in node.doc:
return node
pos = self.testspos
if self.scope_type == 'module':
path = node.entry.name
elif self.scope_type in ('pyclass', 'cclass'):
if isinstance(node, CFuncDefNode):
if node.py_func is not None:
name = node.py_func.name
else:
name = node.entry.name
else:
name = node.name
if self.scope_type == 'cclass' and name in self.blacklist:
return node
if self.scope_type == 'pyclass':
class_name = self.scope_node.name
else:
class_name = self.scope_node.class_name
if isinstance(node.entry.scope, Symtab.PropertyScope):
property_method_name = node.entry.scope.name
path = "%s.%s.%s" % (class_name, node.entry.scope.name,
node.entry.name)
else:
path = "%s.%s" % (class_name, node.entry.name)
else:
assert False
self.add_test(node.pos, path, node.doc)
return node
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/Annotate.py 0000664 0000000 0000000 00000033431 13766135517 0030420 0 ustar 00root root 0000000 0000000 # Note: Work in progress
from __future__ import absolute_import
import os
import os.path
import re
import codecs
import textwrap
from datetime import datetime
from functools import partial
from collections import defaultdict
from xml.sax.saxutils import escape as html_escape
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # does not support writing 'str' in Py2
from . import Version
from .Code import CCodeWriter
from .. import Utils
class AnnotationCCodeWriter(CCodeWriter):
# also used as marker for detection of complete code emission in tests
COMPLETE_CODE_TITLE = "Complete cythonized code"
def __init__(self, create_from=None, buffer=None, copy_formatting=True, show_entire_c_code=False):
CCodeWriter.__init__(self, create_from, buffer, copy_formatting=copy_formatting)
self.show_entire_c_code = show_entire_c_code
if create_from is None:
self.annotation_buffer = StringIO()
self.last_annotated_pos = None
# annotations[filename][line] -> [(column, AnnotationItem)*]
self.annotations = defaultdict(partial(defaultdict, list))
# code[filename][line] -> str
self.code = defaultdict(partial(defaultdict, str))
# scopes[filename][line] -> set(scopes)
self.scopes = defaultdict(partial(defaultdict, set))
else:
# When creating an insertion point, keep references to the same database
self.annotation_buffer = create_from.annotation_buffer
self.annotations = create_from.annotations
self.code = create_from.code
self.scopes = create_from.scopes
self.last_annotated_pos = create_from.last_annotated_pos
def create_new(self, create_from, buffer, copy_formatting):
return AnnotationCCodeWriter(create_from, buffer, copy_formatting)
def write(self, s):
CCodeWriter.write(self, s)
self.annotation_buffer.write(s)
def mark_pos(self, pos, trace=True):
if pos is not None:
CCodeWriter.mark_pos(self, pos, trace)
if self.funcstate and self.funcstate.scope:
# lambdas and genexprs can result in multiple scopes per line => keep them in a set
self.scopes[pos[0].filename][pos[1]].add(self.funcstate.scope)
if self.last_annotated_pos:
source_desc, line, _ = self.last_annotated_pos
pos_code = self.code[source_desc.filename]
pos_code[line] += self.annotation_buffer.getvalue()
self.annotation_buffer = StringIO()
self.last_annotated_pos = pos
def annotate(self, pos, item):
self.annotations[pos[0].filename][pos[1]].append((pos[2], item))
def _css(self):
"""css template will later allow to choose a colormap"""
css = [self._css_template]
for i in range(255):
color = u"FFFF%02x" % int(255/(1+i/10.0))
css.append('.cython.score-%d {background-color: #%s;}' % (i, color))
try:
from pygments.formatters import HtmlFormatter
except ImportError:
pass
else:
css.append(HtmlFormatter().get_style_defs('.cython'))
return '\n'.join(css)
_css_template = textwrap.dedent("""
body.cython { font-family: courier; font-size: 12; }
.cython.tag { }
.cython.line { margin: 0em }
.cython.code { font-size: 9; color: #444444; display: none; margin: 0px 0px 0px 8px; border-left: 8px none; }
.cython.line .run { background-color: #B0FFB0; }
.cython.line .mis { background-color: #FFB0B0; }
.cython.code.run { border-left: 8px solid #B0FFB0; }
.cython.code.mis { border-left: 8px solid #FFB0B0; }
.cython.code .py_c_api { color: red; }
.cython.code .py_macro_api { color: #FF7000; }
.cython.code .pyx_c_api { color: #FF3000; }
.cython.code .pyx_macro_api { color: #FF7000; }
.cython.code .refnanny { color: #FFA000; }
.cython.code .trace { color: #FFA000; }
.cython.code .error_goto { color: #FFA000; }
.cython.code .coerce { color: #008000; border: 1px dotted #008000 }
.cython.code .py_attr { color: #FF0000; font-weight: bold; }
.cython.code .c_attr { color: #0000FF; }
.cython.code .py_call { color: #FF0000; font-weight: bold; }
.cython.code .c_call { color: #0000FF; }
""")
# on-click toggle function to show/hide C source code
_onclick_attr = ' onclick="{0}"'.format((
"(function(s){"
" s.display = s.display === 'block' ? 'none' : 'block'"
"})(this.nextElementSibling.style)"
).replace(' ', '') # poor dev's JS minification
)
def save_annotation(self, source_filename, target_filename, coverage_xml=None):
with Utils.open_source_file(source_filename) as f:
code = f.read()
generated_code = self.code.get(source_filename, {})
c_file = Utils.decode_filename(os.path.basename(target_filename))
html_filename = os.path.splitext(target_filename)[0] + ".html"
with codecs.open(html_filename, "w", encoding="UTF-8") as out_buffer:
out_buffer.write(self._save_annotation(code, generated_code, c_file, source_filename, coverage_xml))
def _save_annotation_header(self, c_file, source_filename, coverage_timestamp=None):
coverage_info = ''
if coverage_timestamp:
coverage_info = u' with coverage data from {timestamp}'.format(
timestamp=datetime.fromtimestamp(int(coverage_timestamp) // 1000))
outlist = [
textwrap.dedent(u'''\
Cython: {filename}
Generated by Cython {watermark}{more_info}
Yellow lines hint at Python interaction.
Click on a line that starts with a "+
" to see the C code that Cython generated for it.
''').format(css=self._css(), watermark=Version.watermark,
filename=os.path.basename(source_filename) if source_filename else '',
more_info=coverage_info)
]
if c_file:
outlist.append(u'Raw output: %s
\n' % (c_file, c_file))
return outlist
def _save_annotation_footer(self):
return (u'\n',)
def _save_annotation(self, code, generated_code, c_file=None, source_filename=None, coverage_xml=None):
"""
lines : original cython source code split by lines
generated_code : generated c code keyed by line number in original file
target filename : name of the file in which to store the generated html
c_file : filename in which the c_code has been written
"""
if coverage_xml is not None and source_filename:
coverage_timestamp = coverage_xml.get('timestamp', '').strip()
covered_lines = self._get_line_coverage(coverage_xml, source_filename)
else:
coverage_timestamp = covered_lines = None
annotation_items = dict(self.annotations[source_filename])
scopes = dict(self.scopes[source_filename])
outlist = []
outlist.extend(self._save_annotation_header(c_file, source_filename, coverage_timestamp))
outlist.extend(self._save_annotation_body(code, generated_code, annotation_items, scopes, covered_lines))
outlist.extend(self._save_annotation_footer())
return ''.join(outlist)
def _get_line_coverage(self, coverage_xml, source_filename):
coverage_data = None
for entry in coverage_xml.iterfind('.//class'):
if not entry.get('filename'):
continue
if (entry.get('filename') == source_filename or
os.path.abspath(entry.get('filename')) == source_filename):
coverage_data = entry
break
elif source_filename.endswith(entry.get('filename')):
coverage_data = entry # but we might still find a better match...
if coverage_data is None:
return None
return dict(
(int(line.get('number')), int(line.get('hits')))
for line in coverage_data.iterfind('lines/line')
)
def _htmlify_code(self, code, language):
try:
from pygments import highlight
from pygments.lexers import CythonLexer, CppLexer
from pygments.formatters import HtmlFormatter
except ImportError:
# no Pygments, just escape the code
return html_escape(code)
if language == "cython":
lexer = CythonLexer(stripnl=False, stripall=False)
elif language == "c/cpp":
lexer = CppLexer(stripnl=False, stripall=False)
else:
# unknown language, use fallback
return html_escape(code)
html_code = highlight(
code, lexer,
HtmlFormatter(nowrap=True))
return html_code
def _save_annotation_body(self, cython_code, generated_code, annotation_items, scopes, covered_lines=None):
outlist = [u'']
pos_comment_marker = u'/* \N{HORIZONTAL ELLIPSIS} */\n'
new_calls_map = dict(
(name, 0) for name in
'refnanny trace py_macro_api py_c_api pyx_macro_api pyx_c_api error_goto'.split()
).copy
self.mark_pos(None)
def annotate(match):
group_name = match.lastgroup
calls[group_name] += 1
return u"
%s" % (
group_name, match.group(group_name))
lines = self._htmlify_code(cython_code, "cython").splitlines()
lineno_width = len(str(len(lines)))
if not covered_lines:
covered_lines = None
for k, line in enumerate(lines, 1):
try:
c_code = generated_code[k]
except KeyError:
c_code = ''
else:
c_code = _replace_pos_comment(pos_comment_marker, c_code)
if c_code.startswith(pos_comment_marker):
c_code = c_code[len(pos_comment_marker):]
c_code = html_escape(c_code)
calls = new_calls_map()
c_code = _parse_code(annotate, c_code)
score = (5 * calls['py_c_api'] + 2 * calls['pyx_c_api'] +
calls['py_macro_api'] + calls['pyx_macro_api'])
if c_code:
onclick = self._onclick_attr
expandsymbol = '+'
else:
onclick = ''
expandsymbol = ' '
covered = ''
if covered_lines is not None and k in covered_lines:
hits = covered_lines[k]
if hits is not None:
covered = 'run' if hits else 'mis'
outlist.append(
u'
'
# generate line number with expand symbol in front,
# and the right number of digit
u'{expandsymbol}{line:0{lineno_width}d}: {code}
\n'.format(
score=score,
expandsymbol=expandsymbol,
covered=covered,
lineno_width=lineno_width,
line=k,
code=line.rstrip(),
onclick=onclick,
))
if c_code:
outlist.append(u"
{code}
".format(
score=score, covered=covered, code=c_code))
outlist.append(u"
")
# now the whole c-code if needed:
if self.show_entire_c_code:
outlist.append(u'')
onclick_title = u"
+ {title}
\n"
outlist.append(onclick_title.format(
onclick=self._onclick_attr,
title=AnnotationCCodeWriter.COMPLETE_CODE_TITLE,
))
complete_code_as_html = self._htmlify_code(self.buffer.getvalue(), "c/cpp")
outlist.append(u"
{code}
".format(code=complete_code_as_html))
outlist.append(u"
")
return outlist
_parse_code = re.compile((
br'(?P__Pyx_X?(?:GOT|GIVE)REF|__Pyx_RefNanny[A-Za-z]+)|'
br'(?P__Pyx_Trace[A-Za-z]+)|'
br'(?:'
br'(?P__Pyx_[A-Z][A-Z_]+)|'
br'(?P(?:__Pyx_[A-Z][a-z_][A-Za-z_]*)|__pyx_convert_[A-Za-z_]*)|'
br'(?PPy[A-Z][a-z]+_[A-Z][A-Z_]+)|'
br'(?PPy[A-Z][a-z]+_[A-Z][a-z][A-Za-z_]*)'
br')(?=\()|' # look-ahead to exclude subsequent '(' from replacement
br'(?P(?:(?<=;) *if [^;]* +)?__PYX_ERR\([^)]+\))'
).decode('ascii')).sub
_replace_pos_comment = re.compile(
# this matches what Cython generates as code line marker comment
br'^\s*/\*(?:(?:[^*]|\*[^/])*\n)+\s*\*/\s*\n'.decode('ascii'),
re.M
).sub
class AnnotationItem(object):
def __init__(self, style, text, tag="", size=0):
self.style = style
self.text = text
self.tag = tag
self.size = size
def start(self):
return u"%s" % (self.style, self.text, self.tag)
def end(self):
return self.size, u""
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/AutoDocTransforms.py0000664 0000000 0000000 00000020705 13766135517 0032264 0 ustar 00root root 0000000 0000000 from __future__ import absolute_import, print_function
from .Visitor import CythonTransform
from .StringEncoding import EncodedString
from . import Options
from . import PyrexTypes
from ..CodeWriter import ExpressionWriter
from .Errors import warning
class AnnotationWriter(ExpressionWriter):
"""
A Cython code writer for Python expressions in argument/variable annotations.
"""
def __init__(self, description=None):
"""description is optional. If specified it is used in
warning messages for the nodes that don't convert to string properly.
If not specified then no messages are generated.
"""
ExpressionWriter.__init__(self)
self.description = description
def visit_Node(self, node):
self.put(u"??>")
if self.description:
warning(node.pos,
"Failed to convert code to string representation in {0}".format(
self.description), level=1)
def visit_LambdaNode(self, node):
# XXX Should we do better?
self.put("")
if self.description:
warning(node.pos,
"Failed to convert lambda to string representation in {0}".format(
self.description), level=1)
def visit_AnnotationNode(self, node):
self.put(node.string.unicode_value)
class EmbedSignature(CythonTransform):
def __init__(self, context):
super(EmbedSignature, self).__init__(context)
self.class_name = None
self.class_node = None
def _fmt_expr(self, node):
writer = AnnotationWriter()
result = writer.write(node)
# print(type(node).__name__, '-->', result)
return result
def _fmt_arg(self, arg):
if arg.type is PyrexTypes.py_object_type or arg.is_self_arg:
doc = arg.name
else:
doc = arg.type.declaration_code(arg.name, for_display=1)
if arg.annotation:
annotation = self._fmt_expr(arg.annotation)
doc = doc + (': %s' % annotation)
if arg.default:
default = self._fmt_expr(arg.default)
doc = doc + (' = %s' % default)
elif arg.default:
default = self._fmt_expr(arg.default)
doc = doc + ('=%s' % default)
return doc
def _fmt_star_arg(self, arg):
arg_doc = arg.name
if arg.annotation:
annotation = self._fmt_expr(arg.annotation)
arg_doc = arg_doc + (': %s' % annotation)
return arg_doc
def _fmt_arglist(self, args,
npoargs=0, npargs=0, pargs=None,
nkargs=0, kargs=None,
hide_self=False):
arglist = []
for arg in args:
if not hide_self or not arg.entry.is_self_arg:
arg_doc = self._fmt_arg(arg)
arglist.append(arg_doc)
if pargs:
arg_doc = self._fmt_star_arg(pargs)
arglist.insert(npargs + npoargs, '*%s' % arg_doc)
elif nkargs:
arglist.insert(npargs + npoargs, '*')
if npoargs:
arglist.insert(npoargs, '/')
if kargs:
arg_doc = self._fmt_star_arg(kargs)
arglist.append('**%s' % arg_doc)
return arglist
def _fmt_ret_type(self, ret):
if ret is PyrexTypes.py_object_type:
return None
else:
return ret.declaration_code("", for_display=1)
def _fmt_signature(self, cls_name, func_name, args,
npoargs=0, npargs=0, pargs=None,
nkargs=0, kargs=None,
return_expr=None,
return_type=None, hide_self=False):
arglist = self._fmt_arglist(args,
npoargs, npargs, pargs,
nkargs, kargs,
hide_self=hide_self)
arglist_doc = ', '.join(arglist)
func_doc = '%s(%s)' % (func_name, arglist_doc)
if cls_name:
func_doc = '%s.%s' % (cls_name, func_doc)
ret_doc = None
if return_expr:
ret_doc = self._fmt_expr(return_expr)
elif return_type:
ret_doc = self._fmt_ret_type(return_type)
if ret_doc:
func_doc = '%s -> %s' % (func_doc, ret_doc)
return func_doc
def _embed_signature(self, signature, node_doc):
if node_doc:
return "%s\n%s" % (signature, node_doc)
else:
return signature
def __call__(self, node):
if not Options.docstrings:
return node
else:
return super(EmbedSignature, self).__call__(node)
def visit_ClassDefNode(self, node):
oldname = self.class_name
oldclass = self.class_node
self.class_node = node
try:
# PyClassDefNode
self.class_name = node.name
except AttributeError:
# CClassDefNode
self.class_name = node.class_name
self.visitchildren(node)
self.class_name = oldname
self.class_node = oldclass
return node
def visit_LambdaNode(self, node):
# lambda expressions so not have signature or inner functions
return node
def visit_DefNode(self, node):
if not self.current_directives['embedsignature']:
return node
is_constructor = False
hide_self = False
if node.entry.is_special:
is_constructor = self.class_node and node.name == '__init__'
if not is_constructor:
return node
class_name, func_name = None, self.class_name
hide_self = True
else:
class_name, func_name = self.class_name, node.name
npoargs = getattr(node, 'num_posonly_args', 0)
nkargs = getattr(node, 'num_kwonly_args', 0)
npargs = len(node.args) - nkargs - npoargs
signature = self._fmt_signature(
class_name, func_name, node.args,
npoargs, npargs, node.star_arg,
nkargs, node.starstar_arg,
return_expr=node.return_type_annotation,
return_type=None, hide_self=hide_self)
if signature:
if is_constructor:
doc_holder = self.class_node.entry.type.scope
else:
doc_holder = node.entry
if doc_holder.doc is not None:
old_doc = doc_holder.doc
elif not is_constructor and getattr(node, 'py_func', None) is not None:
old_doc = node.py_func.entry.doc
else:
old_doc = None
new_doc = self._embed_signature(signature, old_doc)
doc_holder.doc = EncodedString(new_doc)
if not is_constructor and getattr(node, 'py_func', None) is not None:
node.py_func.entry.doc = EncodedString(new_doc)
return node
def visit_CFuncDefNode(self, node):
if not self.current_directives['embedsignature']:
return node
if not node.overridable: # not cpdef FOO(...):
return node
signature = self._fmt_signature(
self.class_name, node.declarator.base.name,
node.declarator.args,
return_type=node.return_type)
if signature:
if node.entry.doc is not None:
old_doc = node.entry.doc
elif getattr(node, 'py_func', None) is not None:
old_doc = node.py_func.entry.doc
else:
old_doc = None
new_doc = self._embed_signature(signature, old_doc)
node.entry.doc = EncodedString(new_doc)
py_func = getattr(node, 'py_func', None)
if py_func is not None:
py_func.entry.doc = EncodedString(new_doc)
return node
def visit_PropertyNode(self, node):
if not self.current_directives['embedsignature']:
return node
entry = node.entry
if entry.visibility == 'public':
# property synthesised from a cdef public attribute
type_name = entry.type.declaration_code("", for_display=1)
if not entry.type.is_pyobject:
type_name = "'%s'" % type_name
elif entry.type.is_extension_type:
type_name = entry.type.module_name + '.' + type_name
signature = '%s: %s' % (entry.name, type_name)
new_doc = self._embed_signature(signature, entry.doc)
entry.doc = EncodedString(new_doc)
return node
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/Buffer.py 0000664 0000000 0000000 00000070362 13766135517 0030064 0 ustar 00root root 0000000 0000000 from __future__ import absolute_import
from .Visitor import CythonTransform
from .ModuleNode import ModuleNode
from .Errors import CompileError
from .UtilityCode import CythonUtilityCode
from .Code import UtilityCode, TempitaUtilityCode
from . import Options
from . import Interpreter
from . import PyrexTypes
from . import Naming
from . import Symtab
def dedent(text, reindent=0):
from textwrap import dedent
text = dedent(text)
if reindent > 0:
indent = " " * reindent
text = '\n'.join([indent + x for x in text.split('\n')])
return text
class IntroduceBufferAuxiliaryVars(CythonTransform):
#
# Entry point
#
buffers_exists = False
using_memoryview = False
def __call__(self, node):
assert isinstance(node, ModuleNode)
self.max_ndim = 0
result = super(IntroduceBufferAuxiliaryVars, self).__call__(node)
if self.buffers_exists:
use_bufstruct_declare_code(node.scope)
use_py2_buffer_functions(node.scope)
return result
#
# Basic operations for transforms
#
def handle_scope(self, node, scope):
# For all buffers, insert extra variables in the scope.
# The variables are also accessible from the buffer_info
# on the buffer entry
scope_items = scope.entries.items()
bufvars = [entry for name, entry in scope_items if entry.type.is_buffer]
if len(bufvars) > 0:
bufvars.sort(key=lambda entry: entry.name)
self.buffers_exists = True
memviewslicevars = [entry for name, entry in scope_items if entry.type.is_memoryviewslice]
if len(memviewslicevars) > 0:
self.buffers_exists = True
for (name, entry) in scope_items:
if name == 'memoryview' and isinstance(entry.utility_code_definition, CythonUtilityCode):
self.using_memoryview = True
break
del scope_items
if isinstance(node, ModuleNode) and len(bufvars) > 0:
# for now...note that pos is wrong
raise CompileError(node.pos, "Buffer vars not allowed in module scope")
for entry in bufvars:
if entry.type.dtype.is_ptr:
raise CompileError(node.pos, "Buffers with pointer types not yet supported.")
name = entry.name
buftype = entry.type
if buftype.ndim > Options.buffer_max_dims:
raise CompileError(node.pos,
"Buffer ndims exceeds Options.buffer_max_dims = %d" % Options.buffer_max_dims)
if buftype.ndim > self.max_ndim:
self.max_ndim = buftype.ndim
# Declare auxiliary vars
def decvar(type, prefix):
cname = scope.mangle(prefix, name)
aux_var = scope.declare_var(name=None, cname=cname,
type=type, pos=node.pos)
if entry.is_arg:
aux_var.used = True # otherwise, NameNode will mark whether it is used
return aux_var
auxvars = ((PyrexTypes.c_pyx_buffer_nd_type, Naming.pybuffernd_prefix),
(PyrexTypes.c_pyx_buffer_type, Naming.pybufferstruct_prefix))
pybuffernd, rcbuffer = [decvar(type, prefix) for (type, prefix) in auxvars]
entry.buffer_aux = Symtab.BufferAux(pybuffernd, rcbuffer)
scope.buffer_entries = bufvars
self.scope = scope
def visit_ModuleNode(self, node):
self.handle_scope(node, node.scope)
self.visitchildren(node)
return node
def visit_FuncDefNode(self, node):
self.handle_scope(node, node.local_scope)
self.visitchildren(node)
return node
#
# Analysis
#
buffer_options = ("dtype", "ndim", "mode", "negative_indices", "cast") # ordered!
buffer_defaults = {"ndim": 1, "mode": "full", "negative_indices": True, "cast": False}
buffer_positional_options_count = 1 # anything beyond this needs keyword argument
ERR_BUF_OPTION_UNKNOWN = '"%s" is not a buffer option'
ERR_BUF_TOO_MANY = 'Too many buffer options'
ERR_BUF_DUP = '"%s" buffer option already supplied'
ERR_BUF_MISSING = '"%s" missing'
ERR_BUF_MODE = 'Only allowed buffer modes are: "c", "fortran", "full", "strided" (as a compile-time string)'
ERR_BUF_NDIM = 'ndim must be a non-negative integer'
ERR_BUF_DTYPE = 'dtype must be "object", numeric type or a struct'
ERR_BUF_BOOL = '"%s" must be a boolean'
def analyse_buffer_options(globalpos, env, posargs, dictargs, defaults=None, need_complete=True):
"""
Must be called during type analysis, as analyse is called
on the dtype argument.
posargs and dictargs should consist of a list and a dict
of tuples (value, pos). Defaults should be a dict of values.
Returns a dict containing all the options a buffer can have and
its value (with the positions stripped).
"""
if defaults is None:
defaults = buffer_defaults
posargs, dictargs = Interpreter.interpret_compiletime_options(
posargs, dictargs, type_env=env, type_args=(0, 'dtype'))
if len(posargs) > buffer_positional_options_count:
raise CompileError(posargs[-1][1], ERR_BUF_TOO_MANY)
options = {}
for name, (value, pos) in dictargs.items():
if name not in buffer_options:
raise CompileError(pos, ERR_BUF_OPTION_UNKNOWN % name)
options[name] = value
for name, (value, pos) in zip(buffer_options, posargs):
if name not in buffer_options:
raise CompileError(pos, ERR_BUF_OPTION_UNKNOWN % name)
if name in options:
raise CompileError(pos, ERR_BUF_DUP % name)
options[name] = value
# Check that they are all there and copy defaults
for name in buffer_options:
if name not in options:
try:
options[name] = defaults[name]
except KeyError:
if need_complete:
raise CompileError(globalpos, ERR_BUF_MISSING % name)
dtype = options.get("dtype")
if dtype and dtype.is_extension_type:
raise CompileError(globalpos, ERR_BUF_DTYPE)
ndim = options.get("ndim")
if ndim and (not isinstance(ndim, int) or ndim < 0):
raise CompileError(globalpos, ERR_BUF_NDIM)
mode = options.get("mode")
if mode and not (mode in ('full', 'strided', 'c', 'fortran')):
raise CompileError(globalpos, ERR_BUF_MODE)
def assert_bool(name):
x = options.get(name)
if not isinstance(x, bool):
raise CompileError(globalpos, ERR_BUF_BOOL % name)
assert_bool('negative_indices')
assert_bool('cast')
return options
#
# Code generation
#
class BufferEntry(object):
def __init__(self, entry):
self.entry = entry
self.type = entry.type
self.cname = entry.buffer_aux.buflocal_nd_var.cname
self.buf_ptr = "%s.rcbuffer->pybuffer.buf" % self.cname
self.buf_ptr_type = entry.type.buffer_ptr_type
self.init_attributes()
def init_attributes(self):
self.shape = self.get_buf_shapevars()
self.strides = self.get_buf_stridevars()
self.suboffsets = self.get_buf_suboffsetvars()
def get_buf_suboffsetvars(self):
return self._for_all_ndim("%s.diminfo[%d].suboffsets")
def get_buf_stridevars(self):
return self._for_all_ndim("%s.diminfo[%d].strides")
def get_buf_shapevars(self):
return self._for_all_ndim("%s.diminfo[%d].shape")
def _for_all_ndim(self, s):
return [s % (self.cname, i) for i in range(self.type.ndim)]
def generate_buffer_lookup_code(self, code, index_cnames):
# Create buffer lookup and return it
# This is done via utility macros/inline functions, which vary
# according to the access mode used.
params = []
nd = self.type.ndim
mode = self.type.mode
if mode == 'full':
for i, s, o in zip(index_cnames,
self.get_buf_stridevars(),
self.get_buf_suboffsetvars()):
params.append(i)
params.append(s)
params.append(o)
funcname = "__Pyx_BufPtrFull%dd" % nd
funcgen = buf_lookup_full_code
else:
if mode == 'strided':
funcname = "__Pyx_BufPtrStrided%dd" % nd
funcgen = buf_lookup_strided_code
elif mode == 'c':
funcname = "__Pyx_BufPtrCContig%dd" % nd
funcgen = buf_lookup_c_code
elif mode == 'fortran':
funcname = "__Pyx_BufPtrFortranContig%dd" % nd
funcgen = buf_lookup_fortran_code
else:
assert False
for i, s in zip(index_cnames, self.get_buf_stridevars()):
params.append(i)
params.append(s)
# Make sure the utility code is available
if funcname not in code.globalstate.utility_codes:
code.globalstate.utility_codes.add(funcname)
protocode = code.globalstate['utility_code_proto']
defcode = code.globalstate['utility_code_def']
funcgen(protocode, defcode, name=funcname, nd=nd)
buf_ptr_type_code = self.buf_ptr_type.empty_declaration_code()
ptrcode = "%s(%s, %s, %s)" % (funcname, buf_ptr_type_code, self.buf_ptr,
", ".join(params))
return ptrcode
def get_flags(buffer_aux, buffer_type):
flags = 'PyBUF_FORMAT'
mode = buffer_type.mode
if mode == 'full':
flags += '| PyBUF_INDIRECT'
elif mode == 'strided':
flags += '| PyBUF_STRIDES'
elif mode == 'c':
flags += '| PyBUF_C_CONTIGUOUS'
elif mode == 'fortran':
flags += '| PyBUF_F_CONTIGUOUS'
else:
assert False
if buffer_aux.writable_needed: flags += "| PyBUF_WRITABLE"
return flags
def used_buffer_aux_vars(entry):
buffer_aux = entry.buffer_aux
buffer_aux.buflocal_nd_var.used = True
buffer_aux.rcbuf_var.used = True
def put_unpack_buffer_aux_into_scope(buf_entry, code):
# Generate code to copy the needed struct info into local
# variables.
buffer_aux, mode = buf_entry.buffer_aux, buf_entry.type.mode
pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
fldnames = ['strides', 'shape']
if mode == 'full':
fldnames.append('suboffsets')
ln = []
for i in range(buf_entry.type.ndim):
for fldname in fldnames:
ln.append("%s.diminfo[%d].%s = %s.rcbuffer->pybuffer.%s[%d];" % (
pybuffernd_struct, i, fldname,
pybuffernd_struct, fldname, i,
))
code.putln(' '.join(ln))
def put_init_vars(entry, code):
bufaux = entry.buffer_aux
pybuffernd_struct = bufaux.buflocal_nd_var.cname
pybuffer_struct = bufaux.rcbuf_var.cname
# init pybuffer_struct
code.putln("%s.pybuffer.buf = NULL;" % pybuffer_struct)
code.putln("%s.refcount = 0;" % pybuffer_struct)
# init the buffer object
# code.put_init_var_to_py_none(entry)
# init the pybuffernd_struct
code.putln("%s.data = NULL;" % pybuffernd_struct)
code.putln("%s.rcbuffer = &%s;" % (pybuffernd_struct, pybuffer_struct))
def put_acquire_arg_buffer(entry, code, pos):
buffer_aux = entry.buffer_aux
getbuffer = get_getbuffer_call(code, entry.cname, buffer_aux, entry.type)
# Acquire any new buffer
code.putln("{")
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % entry.type.dtype.struct_nesting_depth())
code.putln(code.error_goto_if("%s == -1" % getbuffer, pos))
code.putln("}")
# An exception raised in arg parsing cannot be caught, so no
# need to care about the buffer then.
put_unpack_buffer_aux_into_scope(entry, code)
def put_release_buffer_code(code, entry):
code.globalstate.use_utility_code(acquire_utility_code)
code.putln("__Pyx_SafeReleaseBuffer(&%s.rcbuffer->pybuffer);" % entry.buffer_aux.buflocal_nd_var.cname)
def get_getbuffer_call(code, obj_cname, buffer_aux, buffer_type):
ndim = buffer_type.ndim
cast = int(buffer_type.cast)
flags = get_flags(buffer_aux, buffer_type)
pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
dtype_typeinfo = get_type_information_cname(code, buffer_type.dtype)
code.globalstate.use_utility_code(acquire_utility_code)
return ("__Pyx_GetBufferAndValidate(&%(pybuffernd_struct)s.rcbuffer->pybuffer, "
"(PyObject*)%(obj_cname)s, &%(dtype_typeinfo)s, %(flags)s, %(ndim)d, "
"%(cast)d, __pyx_stack)" % locals())
def put_assign_to_buffer(lhs_cname, rhs_cname, buf_entry,
is_initialized, pos, code):
"""
Generate code for reassigning a buffer variables. This only deals with getting
the buffer auxiliary structure and variables set up correctly, the assignment
itself and refcounting is the responsibility of the caller.
However, the assignment operation may throw an exception so that the reassignment
never happens.
Depending on the circumstances there are two possible outcomes:
- Old buffer released, new acquired, rhs assigned to lhs
- Old buffer released, new acquired which fails, reaqcuire old lhs buffer
(which may or may not succeed).
"""
buffer_aux, buffer_type = buf_entry.buffer_aux, buf_entry.type
pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
flags = get_flags(buffer_aux, buffer_type)
code.putln("{") # Set up necessary stack for getbuffer
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % buffer_type.dtype.struct_nesting_depth())
getbuffer = get_getbuffer_call(code, "%s", buffer_aux, buffer_type) # fill in object below
if is_initialized:
# Release any existing buffer
code.putln('__Pyx_SafeReleaseBuffer(&%s.rcbuffer->pybuffer);' % pybuffernd_struct)
# Acquire
retcode_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = %s;" % (retcode_cname, getbuffer % rhs_cname))
code.putln('if (%s) {' % (code.unlikely("%s < 0" % retcode_cname)))
# If acquisition failed, attempt to reacquire the old buffer
# before raising the exception. A failure of reacquisition
# will cause the reacquisition exception to be reported, one
# can consider working around this later.
exc_temps = tuple(code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=False)
for _ in range(3))
code.putln('PyErr_Fetch(&%s, &%s, &%s);' % exc_temps)
code.putln('if (%s) {' % code.unlikely("%s == -1" % (getbuffer % lhs_cname)))
code.putln('Py_XDECREF(%s); Py_XDECREF(%s); Py_XDECREF(%s);' % exc_temps) # Do not refnanny these!
code.globalstate.use_utility_code(raise_buffer_fallback_code)
code.putln('__Pyx_RaiseBufferFallbackError();')
code.putln('} else {')
code.putln('PyErr_Restore(%s, %s, %s);' % exc_temps)
code.putln('}')
code.putln('%s = %s = %s = 0;' % exc_temps)
for t in exc_temps:
code.funcstate.release_temp(t)
code.putln('}')
# Unpack indices
put_unpack_buffer_aux_into_scope(buf_entry, code)
code.putln(code.error_goto_if_neg(retcode_cname, pos))
code.funcstate.release_temp(retcode_cname)
else:
# Our entry had no previous value, so set to None when acquisition fails.
# In this case, auxiliary vars should be set up right in initialization to a zero-buffer,
# so it suffices to set the buf field to NULL.
code.putln('if (%s) {' % code.unlikely("%s == -1" % (getbuffer % rhs_cname)))
code.putln('%s = %s; __Pyx_INCREF(Py_None); %s.rcbuffer->pybuffer.buf = NULL;' %
(lhs_cname,
PyrexTypes.typecast(buffer_type, PyrexTypes.py_object_type, "Py_None"),
pybuffernd_struct))
code.putln(code.error_goto(pos))
code.put('} else {')
# Unpack indices
put_unpack_buffer_aux_into_scope(buf_entry, code)
code.putln('}')
code.putln("}") # Release stack
def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives,
pos, code, negative_indices, in_nogil_context):
"""
Generates code to process indices and calculate an offset into
a buffer. Returns a C string which gives a pointer which can be
read from or written to at will (it is an expression so caller should
store it in a temporary if it is used more than once).
As the bounds checking can have any number of combinations of unsigned
arguments, smart optimizations etc. we insert it directly in the function
body. The lookup however is delegated to a inline function that is instantiated
once per ndim (lookup with suboffsets tend to get quite complicated).
entry is a BufferEntry
"""
negative_indices = directives['wraparound'] and negative_indices
if directives['boundscheck']:
# Check bounds and fix negative indices.
# We allocate a temporary which is initialized to -1, meaning OK (!).
# If an error occurs, the temp is set to the index dimension the
# error is occurring at.
failed_dim_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = -1;" % failed_dim_temp)
for dim, (signed, cname, shape) in enumerate(zip(index_signeds, index_cnames, entry.get_buf_shapevars())):
if signed != 0:
# not unsigned, deal with negative index
code.putln("if (%s < 0) {" % cname)
if negative_indices:
code.putln("%s += %s;" % (cname, shape))
code.putln("if (%s) %s = %d;" % (
code.unlikely("%s < 0" % cname),
failed_dim_temp, dim))
else:
code.putln("%s = %d;" % (failed_dim_temp, dim))
code.put("} else ")
# check bounds in positive direction
if signed != 0:
cast = ""
else:
cast = "(size_t)"
code.putln("if (%s) %s = %d;" % (
code.unlikely("%s >= %s%s" % (cname, cast, shape)),
failed_dim_temp, dim))
if in_nogil_context:
code.globalstate.use_utility_code(raise_indexerror_nogil)
func = '__Pyx_RaiseBufferIndexErrorNogil'
else:
code.globalstate.use_utility_code(raise_indexerror_code)
func = '__Pyx_RaiseBufferIndexError'
code.putln("if (%s) {" % code.unlikely("%s != -1" % failed_dim_temp))
code.putln('%s(%s);' % (func, failed_dim_temp))
code.putln(code.error_goto(pos))
code.putln('}')
code.funcstate.release_temp(failed_dim_temp)
elif negative_indices:
# Only fix negative indices.
for signed, cname, shape in zip(index_signeds, index_cnames, entry.get_buf_shapevars()):
if signed != 0:
code.putln("if (%s < 0) %s += %s;" % (cname, cname, shape))
return entry.generate_buffer_lookup_code(code, index_cnames)
def use_bufstruct_declare_code(env):
env.use_utility_code(buffer_struct_declare_code)
def buf_lookup_full_code(proto, defin, name, nd):
"""
Generates a buffer lookup function for the right number
of dimensions. The function gives back a void* at the right location.
"""
# _i_ndex, _s_tride, sub_o_ffset
macroargs = ", ".join(["i%d, s%d, o%d" % (i, i, i) for i in range(nd)])
proto.putln("#define %s(type, buf, %s) (type)(%s_imp(buf, %s))" % (name, macroargs, name, macroargs))
funcargs = ", ".join(["Py_ssize_t i%d, Py_ssize_t s%d, Py_ssize_t o%d" % (i, i, i) for i in range(nd)])
proto.putln("static CYTHON_INLINE void* %s_imp(void* buf, %s);" % (name, funcargs))
defin.putln(dedent("""
static CYTHON_INLINE void* %s_imp(void* buf, %s) {
char* ptr = (char*)buf;
""") % (name, funcargs) + "".join([dedent("""\
ptr += s%d * i%d;
if (o%d >= 0) ptr = *((char**)ptr) + o%d;
""") % (i, i, i, i) for i in range(nd)]
) + "\nreturn ptr;\n}")
def buf_lookup_strided_code(proto, defin, name, nd):
"""
Generates a buffer lookup function for the right number
of dimensions. The function gives back a void* at the right location.
"""
# _i_ndex, _s_tride
args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
offset = " + ".join(["i%d * s%d" % (i, i) for i in range(nd)])
proto.putln("#define %s(type, buf, %s) (type)((char*)buf + %s)" % (name, args, offset))
def buf_lookup_c_code(proto, defin, name, nd):
"""
Similar to strided lookup, but can assume that the last dimension
doesn't need a multiplication as long as.
Still we keep the same signature for now.
"""
if nd == 1:
proto.putln("#define %s(type, buf, i0, s0) ((type)buf + i0)" % name)
else:
args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
offset = " + ".join(["i%d * s%d" % (i, i) for i in range(nd - 1)])
proto.putln("#define %s(type, buf, %s) ((type)((char*)buf + %s) + i%d)" % (name, args, offset, nd - 1))
def buf_lookup_fortran_code(proto, defin, name, nd):
"""
Like C lookup, but the first index is optimized instead.
"""
if nd == 1:
proto.putln("#define %s(type, buf, i0, s0) ((type)buf + i0)" % name)
else:
args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
offset = " + ".join(["i%d * s%d" % (i, i) for i in range(1, nd)])
proto.putln("#define %s(type, buf, %s) ((type)((char*)buf + %s) + i%d)" % (name, args, offset, 0))
def use_py2_buffer_functions(env):
env.use_utility_code(GetAndReleaseBufferUtilityCode())
class GetAndReleaseBufferUtilityCode(object):
# Emulation of PyObject_GetBuffer and PyBuffer_Release for Python 2.
# For >= 2.6 we do double mode -- use the new buffer interface on objects
# which has the right tp_flags set, but emulation otherwise.
requires = None
is_cython_utility = False
def __init__(self):
pass
def __eq__(self, other):
return isinstance(other, GetAndReleaseBufferUtilityCode)
def __hash__(self):
return 24342342
def get_tree(self, **kwargs): pass
def put_code(self, output):
code = output['utility_code_def']
proto_code = output['utility_code_proto']
env = output.module_node.scope
cython_scope = env.context.cython_scope
# Search all types for __getbuffer__ overloads
types = []
visited_scopes = set()
def find_buffer_types(scope):
if scope in visited_scopes:
return
visited_scopes.add(scope)
for m in scope.cimported_modules:
find_buffer_types(m)
for e in scope.type_entries:
if isinstance(e.utility_code_definition, CythonUtilityCode):
continue
t = e.type
if t.is_extension_type:
if scope is cython_scope and not e.used:
continue
release = get = None
for x in t.scope.pyfunc_entries:
if x.name == u"__getbuffer__": get = x.func_cname
elif x.name == u"__releasebuffer__": release = x.func_cname
if get:
types.append((t.typeptr_cname, get, release))
find_buffer_types(env)
util_code = TempitaUtilityCode.load(
"GetAndReleaseBuffer", from_file="Buffer.c",
context=dict(types=types))
proto = util_code.format_code(util_code.proto)
impl = util_code.format_code(
util_code.inject_string_constants(util_code.impl, output)[1])
proto_code.putln(proto)
code.putln(impl)
def mangle_dtype_name(dtype):
# Use prefixes to separate user defined types from builtins
# (consider "typedef float unsigned_int")
if dtype.is_pyobject:
return "object"
elif dtype.is_ptr:
return "ptr"
else:
if dtype.is_typedef or dtype.is_struct_or_union:
prefix = "nn_"
else:
prefix = ""
return prefix + dtype.specialization_name()
def get_type_information_cname(code, dtype, maxdepth=None):
"""
Output the run-time type information (__Pyx_TypeInfo) for given dtype,
and return the name of the type info struct.
Structs with two floats of the same size are encoded as complex numbers.
One can separate between complex numbers declared as struct or with native
encoding by inspecting to see if the fields field of the type is
filled in.
"""
namesuffix = mangle_dtype_name(dtype)
name = "__Pyx_TypeInfo_%s" % namesuffix
structinfo_name = "__Pyx_StructFields_%s" % namesuffix
if dtype.is_error: return ""
# It's critical that walking the type info doesn't use more stack
# depth than dtype.struct_nesting_depth() returns, so use an assertion for this
if maxdepth is None: maxdepth = dtype.struct_nesting_depth()
if maxdepth <= 0:
assert False
if name not in code.globalstate.utility_codes:
code.globalstate.utility_codes.add(name)
typecode = code.globalstate['typeinfo']
arraysizes = []
if dtype.is_array:
while dtype.is_array:
arraysizes.append(dtype.size)
dtype = dtype.base_type
complex_possible = dtype.is_struct_or_union and dtype.can_be_complex()
declcode = dtype.empty_declaration_code()
if dtype.is_simple_buffer_dtype():
structinfo_name = "NULL"
elif dtype.is_struct:
fields = dtype.scope.var_entries
# Must pre-call all used types in order not to recurse utility code
# writing.
assert len(fields) > 0
types = [get_type_information_cname(code, f.type, maxdepth - 1)
for f in fields]
typecode.putln("static __Pyx_StructField %s[] = {" % structinfo_name, safe=True)
for f, typeinfo in zip(fields, types):
typecode.putln(' {&%s, "%s", offsetof(%s, %s)},' %
(typeinfo, f.name, dtype.empty_declaration_code(), f.cname), safe=True)
typecode.putln(' {NULL, NULL, 0}', safe=True)
typecode.putln("};", safe=True)
else:
assert False
rep = str(dtype)
flags = "0"
is_unsigned = "0"
if dtype is PyrexTypes.c_char_type:
is_unsigned = "IS_UNSIGNED(%s)" % declcode
typegroup = "'H'"
elif dtype.is_int:
is_unsigned = "IS_UNSIGNED(%s)" % declcode
typegroup = "%s ? 'U' : 'I'" % is_unsigned
elif complex_possible or dtype.is_complex:
typegroup = "'C'"
elif dtype.is_float:
typegroup = "'R'"
elif dtype.is_struct:
typegroup = "'S'"
if dtype.packed:
flags = "__PYX_BUF_FLAGS_PACKED_STRUCT"
elif dtype.is_pyobject:
typegroup = "'O'"
else:
assert False, dtype
typeinfo = ('static __Pyx_TypeInfo %s = '
'{ "%s", %s, sizeof(%s), { %s }, %s, %s, %s, %s };')
tup = (name, rep, structinfo_name, declcode,
', '.join([str(x) for x in arraysizes]) or '0', len(arraysizes),
typegroup, is_unsigned, flags)
typecode.putln(typeinfo % tup, safe=True)
return name
def load_buffer_utility(util_code_name, context=None, **kwargs):
if context is None:
return UtilityCode.load(util_code_name, "Buffer.c", **kwargs)
else:
return TempitaUtilityCode.load(util_code_name, "Buffer.c", context=context, **kwargs)
context = dict(max_dims=Options.buffer_max_dims)
buffer_struct_declare_code = load_buffer_utility("BufferStructDeclare", context=context)
buffer_formats_declare_code = load_buffer_utility("BufferFormatStructs")
# Utility function to set the right exception
# The caller should immediately goto_error
raise_indexerror_code = load_buffer_utility("BufferIndexError")
raise_indexerror_nogil = load_buffer_utility("BufferIndexErrorNogil")
raise_buffer_fallback_code = load_buffer_utility("BufferFallbackError")
acquire_utility_code = load_buffer_utility("BufferGetAndValidate", context=context)
buffer_format_check_code = load_buffer_utility("BufferFormatCheck", context=context)
# See utility code BufferFormatFromTypeInfo
_typeinfo_to_format_code = load_buffer_utility("TypeInfoToFormat")
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/Builtin.py 0000664 0000000 0000000 00000110245 13766135517 0030254 0 ustar 00root root 0000000 0000000 #
# Builtin Definitions
#
from __future__ import absolute_import
from .Symtab import BuiltinScope, StructOrUnionScope, CppClassScope
from .Code import UtilityCode
from .TypeSlots import Signature
from . import PyrexTypes
from . import Options
# C-level implementations of builtin types, functions and methods
iter_next_utility_code = UtilityCode.load("IterNext", "ObjectHandling.c")
getattr_utility_code = UtilityCode.load("GetAttr", "ObjectHandling.c")
getattr3_utility_code = UtilityCode.load("GetAttr3", "Builtins.c")
pyexec_utility_code = UtilityCode.load("PyExec", "Builtins.c")
pyexec_globals_utility_code = UtilityCode.load("PyExecGlobals", "Builtins.c")
globals_utility_code = UtilityCode.load("Globals", "Builtins.c")
builtin_utility_code = {
'StopAsyncIteration': UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"),
}
# mapping from builtins to their C-level equivalents
class _BuiltinOverride(object):
def __init__(self, py_name, args, ret_type, cname, py_equiv="*",
utility_code=None, sig=None, func_type=None,
is_strict_signature=False, builtin_return_type=None,
nogil=None):
self.py_name, self.cname, self.py_equiv = py_name, cname, py_equiv
self.args, self.ret_type = args, ret_type
self.func_type, self.sig = func_type, sig
self.builtin_return_type = builtin_return_type
self.is_strict_signature = is_strict_signature
self.utility_code = utility_code
self.nogil = nogil
def build_func_type(self, sig=None, self_arg=None):
if sig is None:
sig = Signature(self.args, self.ret_type, nogil=self.nogil)
sig.exception_check = False # not needed for the current builtins
func_type = sig.function_type(self_arg)
if self.is_strict_signature:
func_type.is_strict_signature = True
if self.builtin_return_type:
func_type.return_type = builtin_types[self.builtin_return_type]
return func_type
class BuiltinAttribute(object):
def __init__(self, py_name, cname=None, field_type=None, field_type_name=None):
self.py_name = py_name
self.cname = cname or py_name
self.field_type_name = field_type_name # can't do the lookup before the type is declared!
self.field_type = field_type
def declare_in_type(self, self_type):
if self.field_type_name is not None:
# lazy type lookup
field_type = builtin_scope.lookup(self.field_type_name).type
else:
field_type = self.field_type or PyrexTypes.py_object_type
entry = self_type.scope.declare(self.py_name, self.cname, field_type, None, 'private')
entry.is_variable = True
class BuiltinFunction(_BuiltinOverride):
def declare_in_scope(self, scope):
func_type, sig = self.func_type, self.sig
if func_type is None:
func_type = self.build_func_type(sig)
scope.declare_builtin_cfunction(self.py_name, func_type, self.cname,
self.py_equiv, self.utility_code)
class BuiltinMethod(_BuiltinOverride):
def declare_in_type(self, self_type):
method_type, sig = self.func_type, self.sig
if method_type is None:
# override 'self' type (first argument)
self_arg = PyrexTypes.CFuncTypeArg("", self_type, None)
self_arg.not_none = True
self_arg.accept_builtin_subtypes = True
method_type = self.build_func_type(sig, self_arg)
self_type.scope.declare_builtin_cfunction(
self.py_name, method_type, self.cname, utility_code=self.utility_code)
builtin_function_table = [
# name, args, return, C API func, py equiv = "*"
BuiltinFunction('abs', "d", "d", "fabs",
is_strict_signature=True, nogil=True),
BuiltinFunction('abs', "f", "f", "fabsf",
is_strict_signature=True, nogil=True),
BuiltinFunction('abs', "i", "i", "abs",
is_strict_signature=True, nogil=True),
BuiltinFunction('abs', "l", "l", "labs",
is_strict_signature=True, nogil=True),
BuiltinFunction('abs', None, None, "__Pyx_abs_longlong",
utility_code = UtilityCode.load("abs_longlong", "Builtins.c"),
func_type = PyrexTypes.CFuncType(
PyrexTypes.c_longlong_type, [
PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_longlong_type, None)
],
is_strict_signature = True, nogil=True)),
] + list(
BuiltinFunction('abs', None, None, "/*abs_{0}*/".format(t.specialization_name()),
func_type = PyrexTypes.CFuncType(
t,
[PyrexTypes.CFuncTypeArg("arg", t, None)],
is_strict_signature = True, nogil=True))
for t in (PyrexTypes.c_uint_type, PyrexTypes.c_ulong_type, PyrexTypes.c_ulonglong_type)
) + list(
BuiltinFunction('abs', None, None, "__Pyx_c_abs{0}".format(t.funcsuffix),
func_type = PyrexTypes.CFuncType(
t.real_type, [
PyrexTypes.CFuncTypeArg("arg", t, None)
],
is_strict_signature = True, nogil=True))
for t in (PyrexTypes.c_float_complex_type,
PyrexTypes.c_double_complex_type,
PyrexTypes.c_longdouble_complex_type)
) + [
BuiltinFunction('abs', "O", "O", "__Pyx_PyNumber_Absolute",
utility_code=UtilityCode.load("py_abs", "Builtins.c")),
#('all', "", "", ""),
#('any', "", "", ""),
#('ascii', "", "", ""),
#('bin', "", "", ""),
BuiltinFunction('callable', "O", "b", "__Pyx_PyCallable_Check",
utility_code = UtilityCode.load("CallableCheck", "ObjectHandling.c")),
#('chr', "", "", ""),
#('cmp', "", "", "", ""), # int PyObject_Cmp(PyObject *o1, PyObject *o2, int *result)
#('compile', "", "", ""), # PyObject* Py_CompileString( char *str, char *filename, int start)
BuiltinFunction('delattr', "OO", "r", "PyObject_DelAttr"),
BuiltinFunction('dir', "O", "O", "PyObject_Dir"),
BuiltinFunction('divmod', "OO", "O", "PyNumber_Divmod"),
BuiltinFunction('exec', "O", "O", "__Pyx_PyExecGlobals",
utility_code = pyexec_globals_utility_code),
BuiltinFunction('exec', "OO", "O", "__Pyx_PyExec2",
utility_code = pyexec_utility_code),
BuiltinFunction('exec', "OOO", "O", "__Pyx_PyExec3",
utility_code = pyexec_utility_code),
#('eval', "", "", ""),
#('execfile', "", "", ""),
#('filter', "", "", ""),
BuiltinFunction('getattr3', "OOO", "O", "__Pyx_GetAttr3", "getattr",
utility_code=getattr3_utility_code), # Pyrex legacy
BuiltinFunction('getattr', "OOO", "O", "__Pyx_GetAttr3",
utility_code=getattr3_utility_code),
BuiltinFunction('getattr', "OO", "O", "__Pyx_GetAttr",
utility_code=getattr_utility_code),
BuiltinFunction('hasattr', "OO", "b", "__Pyx_HasAttr",
utility_code = UtilityCode.load("HasAttr", "Builtins.c")),
BuiltinFunction('hash', "O", "h", "PyObject_Hash"),
#('hex', "", "", ""),
#('id', "", "", ""),
#('input', "", "", ""),
BuiltinFunction('intern', "O", "O", "__Pyx_Intern",
utility_code = UtilityCode.load("Intern", "Builtins.c")),
BuiltinFunction('isinstance', "OO", "b", "PyObject_IsInstance"),
BuiltinFunction('issubclass', "OO", "b", "PyObject_IsSubclass"),
BuiltinFunction('iter', "OO", "O", "PyCallIter_New"),
BuiltinFunction('iter', "O", "O", "PyObject_GetIter"),
BuiltinFunction('len', "O", "z", "PyObject_Length"),
BuiltinFunction('locals', "", "O", "__pyx_locals"),
#('map', "", "", ""),
#('max', "", "", ""),
#('min', "", "", ""),
BuiltinFunction('next', "O", "O", "__Pyx_PyIter_Next",
utility_code = iter_next_utility_code), # not available in Py2 => implemented here
BuiltinFunction('next', "OO", "O", "__Pyx_PyIter_Next2",
utility_code = iter_next_utility_code), # not available in Py2 => implemented here
#('oct', "", "", ""),
#('open', "ss", "O", "PyFile_FromString"), # not in Py3
] + [
BuiltinFunction('ord', None, None, "__Pyx_long_cast",
func_type=PyrexTypes.CFuncType(
PyrexTypes.c_long_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)],
is_strict_signature=True))
for c_type in [PyrexTypes.c_py_ucs4_type, PyrexTypes.c_py_unicode_type]
] + [
BuiltinFunction('ord', None, None, "__Pyx_uchar_cast",
func_type=PyrexTypes.CFuncType(
PyrexTypes.c_uchar_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)],
is_strict_signature=True))
for c_type in [PyrexTypes.c_char_type, PyrexTypes.c_schar_type, PyrexTypes.c_uchar_type]
] + [
BuiltinFunction('ord', None, None, "__Pyx_PyObject_Ord",
utility_code=UtilityCode.load_cached("object_ord", "Builtins.c"),
func_type=PyrexTypes.CFuncType(
PyrexTypes.c_long_type, [
PyrexTypes.CFuncTypeArg("c", PyrexTypes.py_object_type, None)
],
exception_value="(long)(Py_UCS4)-1")),
BuiltinFunction('pow', "OOO", "O", "PyNumber_Power"),
BuiltinFunction('pow', "OO", "O", "__Pyx_PyNumber_Power2",
utility_code = UtilityCode.load("pow2", "Builtins.c")),
#('range', "", "", ""),
#('raw_input', "", "", ""),
#('reduce', "", "", ""),
BuiltinFunction('reload', "O", "O", "PyImport_ReloadModule"),
BuiltinFunction('repr', "O", "O", "PyObject_Repr"), # , builtin_return_type='str'), # add in Cython 3.1
#('round', "", "", ""),
BuiltinFunction('setattr', "OOO", "r", "PyObject_SetAttr"),
#('sum', "", "", ""),
#('sorted', "", "", ""),
#('type', "O", "O", "PyObject_Type"),
BuiltinFunction('unichr', "l", "O", "PyUnicode_FromOrdinal", builtin_return_type='unicode'),
#('unicode', "", "", ""),
#('vars', "", "", ""),
#('zip', "", "", ""),
# Can't do these easily until we have builtin type entries.
#('typecheck', "OO", "i", "PyObject_TypeCheck", False),
#('issubtype', "OO", "i", "PyType_IsSubtype", False),
# Put in namespace append optimization.
BuiltinFunction('__Pyx_PyObject_Append', "OO", "O", "__Pyx_PyObject_Append"),
# This is conditionally looked up based on a compiler directive.
BuiltinFunction('__Pyx_Globals', "", "O", "__Pyx_Globals",
utility_code=globals_utility_code),
]
# Builtin types
# bool
# buffer
# classmethod
# dict
# enumerate
# file
# float
# int
# list
# long
# object
# property
# slice
# staticmethod
# super
# str
# tuple
# type
# xrange
builtin_types_table = [
("type", "PyType_Type", []),
# This conflicts with the C++ bool type, and unfortunately
# C++ is too liberal about PyObject* <-> bool conversions,
# resulting in unintuitive runtime behavior and segfaults.
# ("bool", "PyBool_Type", []),
("int", "PyInt_Type", []),
("long", "PyLong_Type", []),
("float", "PyFloat_Type", []),
("complex", "PyComplex_Type", [BuiltinAttribute('cval', field_type_name = 'Py_complex'),
BuiltinAttribute('real', 'cval.real', field_type = PyrexTypes.c_double_type),
BuiltinAttribute('imag', 'cval.imag', field_type = PyrexTypes.c_double_type),
]),
("basestring", "PyBaseString_Type", [
BuiltinMethod("join", "TO", "T", "__Pyx_PyBaseString_Join",
utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
]),
("bytearray", "PyByteArray_Type", [
]),
("bytes", "PyBytes_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("join", "TO", "O", "__Pyx_PyBytes_Join",
utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
]),
("str", "PyString_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("join", "TO", "O", "__Pyx_PyString_Join",
builtin_return_type='basestring',
utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
]),
("unicode", "PyUnicode_Type", [BuiltinMethod("__contains__", "TO", "b", "PyUnicode_Contains"),
BuiltinMethod("join", "TO", "T", "PyUnicode_Join"),
]),
("tuple", "PyTuple_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
]),
("list", "PyList_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("insert", "TzO", "r", "PyList_Insert"),
BuiltinMethod("reverse", "T", "r", "PyList_Reverse"),
BuiltinMethod("append", "TO", "r", "__Pyx_PyList_Append",
utility_code=UtilityCode.load("ListAppend", "Optimize.c")),
BuiltinMethod("extend", "TO", "r", "__Pyx_PyList_Extend",
utility_code=UtilityCode.load("ListExtend", "Optimize.c")),
]),
("dict", "PyDict_Type", [BuiltinMethod("__contains__", "TO", "b", "PyDict_Contains"),
BuiltinMethod("has_key", "TO", "b", "PyDict_Contains"),
BuiltinMethod("items", "T", "O", "__Pyx_PyDict_Items",
utility_code=UtilityCode.load("py_dict_items", "Builtins.c")),
BuiltinMethod("keys", "T", "O", "__Pyx_PyDict_Keys",
utility_code=UtilityCode.load("py_dict_keys", "Builtins.c")),
BuiltinMethod("values", "T", "O", "__Pyx_PyDict_Values",
utility_code=UtilityCode.load("py_dict_values", "Builtins.c")),
BuiltinMethod("iteritems", "T", "O", "__Pyx_PyDict_IterItems",
utility_code=UtilityCode.load("py_dict_iteritems", "Builtins.c")),
BuiltinMethod("iterkeys", "T", "O", "__Pyx_PyDict_IterKeys",
utility_code=UtilityCode.load("py_dict_iterkeys", "Builtins.c")),
BuiltinMethod("itervalues", "T", "O", "__Pyx_PyDict_IterValues",
utility_code=UtilityCode.load("py_dict_itervalues", "Builtins.c")),
BuiltinMethod("viewitems", "T", "O", "__Pyx_PyDict_ViewItems",
utility_code=UtilityCode.load("py_dict_viewitems", "Builtins.c")),
BuiltinMethod("viewkeys", "T", "O", "__Pyx_PyDict_ViewKeys",
utility_code=UtilityCode.load("py_dict_viewkeys", "Builtins.c")),
BuiltinMethod("viewvalues", "T", "O", "__Pyx_PyDict_ViewValues",
utility_code=UtilityCode.load("py_dict_viewvalues", "Builtins.c")),
BuiltinMethod("clear", "T", "r", "__Pyx_PyDict_Clear",
utility_code=UtilityCode.load("py_dict_clear", "Optimize.c")),
BuiltinMethod("copy", "T", "T", "PyDict_Copy")]),
("slice", "PySlice_Type", [BuiltinAttribute('start'),
BuiltinAttribute('stop'),
BuiltinAttribute('step'),
]),
# ("file", "PyFile_Type", []), # not in Py3
("set", "PySet_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("clear", "T", "r", "PySet_Clear"),
# discard() and remove() have a special treatment for unhashable values
BuiltinMethod("discard", "TO", "r", "__Pyx_PySet_Discard",
utility_code=UtilityCode.load("py_set_discard", "Optimize.c")),
BuiltinMethod("remove", "TO", "r", "__Pyx_PySet_Remove",
utility_code=UtilityCode.load("py_set_remove", "Optimize.c")),
# update is actually variadic (see Github issue #1645)
# BuiltinMethod("update", "TO", "r", "__Pyx_PySet_Update",
# utility_code=UtilityCode.load_cached("PySet_Update", "Builtins.c")),
BuiltinMethod("add", "TO", "r", "PySet_Add"),
BuiltinMethod("pop", "T", "O", "PySet_Pop")]),
("frozenset", "PyFrozenSet_Type", []),
("Exception", "((PyTypeObject*)PyExc_Exception)[0]", []),
("StopAsyncIteration", "((PyTypeObject*)__Pyx_PyExc_StopAsyncIteration)[0]", []),
]
types_that_construct_their_instance = set([
# some builtin types do not always return an instance of
# themselves - these do:
'type', 'bool', 'long', 'float', 'complex',
'bytes', 'unicode', 'bytearray',
'tuple', 'list', 'dict', 'set', 'frozenset'
# 'str', # only in Py3.x
# 'file', # only in Py2.x
])
builtin_structs_table = [
('Py_buffer', 'Py_buffer',
[("buf", PyrexTypes.c_void_ptr_type),
("obj", PyrexTypes.py_object_type),
("len", PyrexTypes.c_py_ssize_t_type),
("itemsize", PyrexTypes.c_py_ssize_t_type),
("readonly", PyrexTypes.c_bint_type),
("ndim", PyrexTypes.c_int_type),
("format", PyrexTypes.c_char_ptr_type),
("shape", PyrexTypes.c_py_ssize_t_ptr_type),
("strides", PyrexTypes.c_py_ssize_t_ptr_type),
("suboffsets", PyrexTypes.c_py_ssize_t_ptr_type),
("smalltable", PyrexTypes.CArrayType(PyrexTypes.c_py_ssize_t_type, 2)),
("internal", PyrexTypes.c_void_ptr_type),
]),
('Py_complex', 'Py_complex',
[('real', PyrexTypes.c_double_type),
('imag', PyrexTypes.c_double_type),
])
]
# inject cyobject
def inject_cy_object(self):
global cy_object_type
def init_scope(scope):
scope.is_cpp_class_scope = 1
scope.is_cyp_class_scope = 1
scope.inherited_var_entries = []
scope.inherited_type_entries = []
cy_object_scope = CppClassScope("CyObject", self, None)
init_scope(cy_object_scope)
cy_object_type = PyrexTypes.cy_object_type
cy_object_scope.type = PyrexTypes.cy_object_type
cy_object_type.set_scope(cy_object_scope)
cy_object_entry = self.declare("CyObject", "CyObject", cy_object_type, None, "extern")
cy_object_entry.is_type = 1
# inject acthon interfaces
def inject_acthon_interfaces(self):
global acthon_result_type, acthon_message_type, acthon_sync_type, acthon_queue_type, acthon_activable_type
def init_scope(scope):
scope.is_cpp_class_scope = 1
scope.is_cyp_class_scope = 1
scope.inherited_var_entries = []
scope.inherited_type_entries = []
# cypclass ActhonResultInterface(CyObject):
# void pushVoidStarResult(void* result){}
# void* getVoidStarResult(){}
# void pushIntResult(int result){}
# int getIntResult(){}
# operator int() { return this->getIntResult(); }
result_scope = CppClassScope("ActhonResultInterface", self, None)
init_scope(result_scope)
acthon_result_type = result_type = PyrexTypes.CypClassType(
"ActhonResultInterface", result_scope, "ActhonResultInterface", (PyrexTypes.cy_object_type,),
activable=False)
result_scope.type = result_type
#result_type.set_scope is required because parent_type is used when doing scope inheritance
result_type.set_scope(result_scope)
result_entry = self.declare("ActhonResultInterface", "ActhonResultInterface", result_type, None, "extern")
result_entry.is_type = 1
result_pushVoidStar_arg_type = PyrexTypes.CFuncTypeArg("result", PyrexTypes.c_void_ptr_type, None)
result_pushVoidStar_type = PyrexTypes.CFuncType(PyrexTypes.c_void_type, [result_pushVoidStar_arg_type], nogil = 1)
result_pushVoidStar_entry = result_scope.declare("pushVoidStarResult", "pushVoidStarResult",
result_pushVoidStar_type, None, "extern")
result_pushVoidStar_entry.is_cfunction = 1
result_pushVoidStar_entry.is_variable = 1
result_scope.var_entries.append(result_pushVoidStar_entry)
result_getVoidStar_type = PyrexTypes.CFuncType(PyrexTypes.c_void_ptr_type, [], nogil = 1)
result_getVoidStar_type.is_const_method = 1
result_getVoidStar_entry = result_scope.declare("getVoidStarResult", "getVoidStarResult",
result_getVoidStar_type, None, "extern")
result_getVoidStar_entry.is_cfunction = 1
result_getVoidStar_entry.is_variable = 1
result_scope.var_entries.append(result_getVoidStar_entry)
result_pushInt_arg_type = PyrexTypes.CFuncTypeArg("result", PyrexTypes.c_int_type, None)
result_pushInt_type = PyrexTypes.CFuncType(PyrexTypes.c_void_type, [result_pushInt_arg_type], nogil = 1)
result_pushInt_entry = result_scope.declare("pushIntResult", "pushIntResult",
result_pushInt_type, None, "extern")
result_pushInt_entry.is_cfunction = 1
result_pushInt_entry.is_variable = 1
result_scope.var_entries.append(result_pushInt_entry)
result_getInt_type = PyrexTypes.CFuncType(PyrexTypes.c_int_type, [], nogil = 1)
result_getInt_type.is_const_method = 1
result_getInt_entry = result_scope.declare("getIntResult", "getIntResult",
result_getInt_type, None, "extern")
result_getInt_entry.is_cfunction = 1
result_getInt_entry.is_variable = 1
result_scope.var_entries.append(result_getInt_entry)
result_int_typecast_type = PyrexTypes.CFuncType(PyrexTypes.c_int_type, [], nogil = 1)
result_int_typecast_entry = result_scope.declare("operator int", "operator int",
result_int_typecast_type, None, "extern")
result_int_typecast_entry.is_cfunction = 1
result_int_typecast_entry.is_variable = 1
result_scope.var_entries.append(result_int_typecast_entry)
result_voidStar_typecast_type = PyrexTypes.CFuncType(PyrexTypes.c_void_ptr_type, [], nogil = 1)
result_voidStar_typecast_entry = result_scope.declare("operator void *", "operator void *",
result_voidStar_typecast_type, None, "extern")
result_voidStar_typecast_entry.is_cfunction = 1
result_voidStar_typecast_entry.is_variable = 1
result_scope.var_entries.append(result_voidStar_typecast_entry)
# cypclass ActhonMessageInterface
message_scope = CppClassScope("ActhonMessageInterface", self, None)
init_scope(message_scope)
acthon_message_type = message_type = PyrexTypes.CypClassType(
"ActhonMessageInterface", message_scope, "ActhonMessageInterface", (PyrexTypes.cy_object_type,),
activable=False)
message_type.set_scope(message_scope)
message_scope.type = message_type
# cypclass ActhonSyncInterface(CyObject):
# bool isActivable(){}
# bool isCompleted(){}
# void insertActivity(ActhonMessageInterface msg){}
# void removeActivity(ActhonMessageInterface msg){}
sync_scope = CppClassScope("ActhonSyncInterface", self, None)
init_scope(sync_scope)
acthon_sync_type = sync_type = PyrexTypes.CypClassType(
"ActhonSyncInterface", sync_scope, "ActhonSyncInterface", (PyrexTypes.cy_object_type,),
activable=False)
sync_type.set_scope(sync_scope)
sync_scope.type = sync_type
sync_entry = self.declare("ActhonSyncInterface", "ActhonSyncInterface", sync_type, None, "extern")
sync_entry.is_type = 1
sync_isActivable_type = PyrexTypes.CFuncType(PyrexTypes.c_bint_type, [], nogil = 1)
sync_isActivable_type.is_const_method = 1
sync_isActivable_entry = sync_scope.declare("isActivable", "isActivable",
sync_isActivable_type, None, "extern")
sync_isActivable_entry.is_cfunction = 1
sync_isActivable_entry.is_variable = 1
sync_scope.var_entries.append(sync_isActivable_entry)
sync_isCompleted_type = PyrexTypes.CFuncType(PyrexTypes.c_bint_type, [], nogil = 1)
sync_isCompleted_type.is_const_method = 1
sync_isCompleted_entry = sync_scope.declare("isCompleted", "isCompleted",
sync_isCompleted_type, None, "extern")
sync_isCompleted_entry.is_cfunction = 1
sync_isCompleted_entry.is_variable = 1
sync_scope.var_entries.append(sync_isCompleted_entry)
sync_msg_arg = PyrexTypes.CFuncTypeArg("msg", message_type, None)
sync_insertActivity_type = PyrexTypes.CFuncType(PyrexTypes.c_void_type, [sync_msg_arg], nogil = 1)
sync_removeActivity_type = PyrexTypes.CFuncType(PyrexTypes.c_void_type, [sync_msg_arg], nogil = 1)
sync_insertActivity_entry = sync_scope.declare("insertActivity", "insertActivity",
sync_insertActivity_type, None, "extern")
sync_insertActivity_entry.is_cfunction = 1
sync_insertActivity_entry.is_variable = 1
sync_scope.var_entries.append(sync_insertActivity_entry)
sync_removeActivity_entry = sync_scope.declare("removeActivity", "removeActivity",
sync_removeActivity_type, None, "extern")
sync_removeActivity_entry.is_cfunction = 1
sync_removeActivity_entry.is_variable = 1
sync_scope.var_entries.append(sync_removeActivity_entry)
# cypclass ActhonMessageInterface(CyObject):
# ActhonSyncInterface _sync_method
# ActhonResultInterface _result
# bool activate(){}
message_entry = self.declare("ActhonMessageInterface", "ActhonMessageInterface", message_type, None, "extern")
message_entry.is_type = 1
message_sync_attr_entry = message_scope.declare("_sync_method", "_sync_method",
sync_type, None, "extern")
message_sync_attr_entry.is_variable = 1
message_scope.var_entries.append(message_sync_attr_entry)
message_result_attr_entry = message_scope.declare("_result", "_result",
result_type, None, "extern")
message_result_attr_entry.is_variable = 1
message_scope.var_entries.append(message_result_attr_entry)
message_activate_type = PyrexTypes.CFuncType(PyrexTypes.c_bint_type, [], nogil = 1)
message_activate_entry = message_scope.declare("activate", "activate",
message_activate_type, None, "extern")
message_activate_entry.is_cfunction = 1
message_activate_entry.is_variable = 1
message_scope.var_entries.append(message_activate_entry)
# cypclass ActhonQueueInterface(CyObject):
# void push(ActhonMessageInterface message){}
# bool activate(){}
queue_scope = CppClassScope("ActhonQueueInterface", self, None)
init_scope(queue_scope)
acthon_queue_type = queue_type = PyrexTypes.CypClassType(
"ActhonQueueInterface", queue_scope, "ActhonQueueInterface", (PyrexTypes.cy_object_type,),
activable=False)
queue_type.set_scope(queue_scope)
queue_scope.type = queue_type
queue_entry = self.declare("ActhonQueueInterface", "ActhonQueueInterface", queue_type, self, "extern")
queue_entry.is_type = 1
queue_msg_arg = PyrexTypes.CFuncTypeArg("msg", message_type, None)
queue_push_type = PyrexTypes.CFuncType(PyrexTypes.c_void_type, [queue_msg_arg], nogil = 1)
queue_push_entry = queue_scope.declare("push", "push", queue_push_type,
None, "extern")
queue_push_entry.is_cfunction = 1
queue_push_entry.is_variable = 1
queue_scope.var_entries.append(queue_push_entry)
queue_activate_type = PyrexTypes.CFuncType(PyrexTypes.c_bint_type, [], nogil = 1)
queue_activate_entry = queue_scope.declare("activate", "activate",
queue_activate_type, None, "extern")
queue_activate_entry.is_cfunction = 1
queue_activate_entry.is_variable = 1
queue_scope.var_entries.append(queue_activate_entry)
queue_is_empty_type = PyrexTypes.CFuncType(PyrexTypes.c_bint_type, [], nogil = 1)
queue_is_empty_type.is_const_method = 1
queue_is_empty_entry = queue_scope.declare("is_empty", "is_empty",
queue_is_empty_type, None, "extern")
queue_is_empty_entry.is_cfunction = 1
queue_is_empty_entry.is_variable = 1
queue_scope.var_entries.append(queue_is_empty_entry)
# cdef cypclass ActivableClass:
# ResultInterface (*_active_result_class)()
# QueueInterface _active_queue_class
activable_scope = CppClassScope("ActhonActivableClass", self, None)
init_scope(activable_scope)
acthon_activable_type = activable_type = PyrexTypes.CypClassType(
"ActhonActivableClass", activable_scope, "ActhonActivableClass", (PyrexTypes.cy_object_type,),
activable=False)
activable_type.set_scope(activable_scope)
activable_entry = self.declare("ActhonActivableClass", None, activable_type, "ActhonActivableClass", "extern")
activable_entry.is_type = 1
activable_result_attr_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(result_entry.type, []))
activable_result_attr_entry = activable_scope.declare("_active_result_class", "_active_result_class",
activable_result_attr_type, None, "extern")
activable_result_attr_entry.is_variable = 1
activable_scope.var_entries.append(activable_result_attr_entry)
activable_queue_attr_entry = activable_scope.declare("_active_queue_class", "_active_queue_class",
PyrexTypes.cyp_class_qualified_type(queue_type, 'locked'), None, "extern")
activable_queue_attr_entry.is_variable = 1
activable_scope.var_entries.append(activable_queue_attr_entry)
# set up builtin scope
builtin_scope = BuiltinScope()
def init_builtin_funcs():
for bf in builtin_function_table:
bf.declare_in_scope(builtin_scope)
builtin_types = {}
def init_builtin_types():
global builtin_types
for name, cname, methods in builtin_types_table:
utility = builtin_utility_code.get(name)
if name == 'frozenset':
objstruct_cname = 'PySetObject'
elif name == 'bytearray':
objstruct_cname = 'PyByteArrayObject'
elif name == 'bool':
objstruct_cname = None
elif name == 'Exception':
objstruct_cname = "PyBaseExceptionObject"
elif name == 'StopAsyncIteration':
objstruct_cname = "PyBaseExceptionObject"
else:
objstruct_cname = 'Py%sObject' % name.capitalize()
the_type = builtin_scope.declare_builtin_type(name, cname, utility, objstruct_cname)
builtin_types[name] = the_type
for method in methods:
method.declare_in_type(the_type)
def init_builtin_structs():
for name, cname, attribute_types in builtin_structs_table:
scope = StructOrUnionScope(name)
for attribute_name, attribute_type in attribute_types:
scope.declare_var(attribute_name, attribute_type, None,
attribute_name, allow_pyobject=True)
builtin_scope.declare_struct_or_union(
name, "struct", scope, 1, None, cname = cname)
def inject_cypclass_refcount_macros():
incref_type = PyrexTypes.CFuncType(
PyrexTypes.c_void_type,
[
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.const_cy_object_type, None)
],
nogil = 1)
decref_type = PyrexTypes.CFuncType(
PyrexTypes.c_void_type,
[
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.CReferenceType(PyrexTypes.const_cy_object_type), None)
],
nogil = 1)
getref_type = PyrexTypes.CFuncType(
PyrexTypes.c_int_type,
[
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.const_cy_object_type, None)
],
nogil = 1)
for macro, macro_type in [("Cy_INCREF", incref_type), ("Cy_DECREF", decref_type), ("Cy_XDECREF", decref_type), ("Cy_GETREF", getref_type)]:
builtin_scope.declare_builtin_cfunction(macro, macro_type, macro)
def inject_cypclass_lock_macros():
blocking_macro_type = PyrexTypes.CFuncType(
PyrexTypes.c_void_type,
[
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.const_cy_object_type, None)
],
nogil = 1)
for macro in ("Cy_RLOCK", "Cy_WLOCK", "Cy_UNWLOCK", "Cy_UNRLOCK"):
builtin_scope.declare_builtin_cfunction(macro, blocking_macro_type, macro)
nonblocking_macro_type = PyrexTypes.CFuncType(PyrexTypes.c_int_type,
[
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.const_cy_object_type, None)
],
nogil = 1)
for macro in ("Cy_TRYRLOCK", "Cy_TRYWLOCK"):
builtin_scope.declare_builtin_cfunction(macro, nonblocking_macro_type, macro)
def inject_cypclass_typecheck_functions():
template_placeholder_type = PyrexTypes.TemplatePlaceholderType("T")
isinstanceof_type = PyrexTypes.CFuncType(
PyrexTypes.c_int_type,
[
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.const_cy_object_type, None),
PyrexTypes.CFuncTypeArg("type", template_placeholder_type, None)
],
nogil = 1,
templates = [template_placeholder_type]
)
builtin_scope.declare_builtin_cfunction("isinstanceof", isinstanceof_type, "isinstanceof")
def init_builtins():
init_builtin_structs()
init_builtin_types()
init_builtin_funcs()
builtin_scope.declare_var(
'__debug__', PyrexTypes.c_const_type(PyrexTypes.c_bint_type),
pos=None, cname='(!Py_OptimizeFlag)', is_cdef=True)
global list_type, tuple_type, dict_type, set_type, frozenset_type
global bytes_type, str_type, unicode_type, basestring_type, slice_type
global float_type, bool_type, type_type, complex_type, bytearray_type
type_type = builtin_scope.lookup('type').type
list_type = builtin_scope.lookup('list').type
tuple_type = builtin_scope.lookup('tuple').type
dict_type = builtin_scope.lookup('dict').type
set_type = builtin_scope.lookup('set').type
frozenset_type = builtin_scope.lookup('frozenset').type
slice_type = builtin_scope.lookup('slice').type
bytes_type = builtin_scope.lookup('bytes').type
str_type = builtin_scope.lookup('str').type
unicode_type = builtin_scope.lookup('unicode').type
basestring_type = builtin_scope.lookup('basestring').type
bytearray_type = builtin_scope.lookup('bytearray').type
float_type = builtin_scope.lookup('float').type
bool_type = builtin_scope.lookup('bool').type
complex_type = builtin_scope.lookup('complex').type
inject_cypclass_refcount_macros()
inject_cypclass_lock_macros()
inject_cypclass_typecheck_functions()
inject_acthon_interfaces(builtin_scope)
inject_cy_object(builtin_scope)
init_builtins()
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/CmdLine.py 0000664 0000000 0000000 00000026131 13766135517 0030161 0 ustar 00root root 0000000 0000000 #
# Cython - Command Line Parsing
#
from __future__ import absolute_import
import os
from argparse import ArgumentParser, Action, SUPPRESS
from . import Options
class ParseDirectivesAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
old_directives = dict(getattr(namespace, self.dest,
Options.get_directive_defaults()))
directives = Options.parse_directive_list(
values, relaxed_bool=True, current_settings=old_directives)
setattr(namespace, self.dest, directives)
class ParseOptionsAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
options = dict(getattr(namespace, self.dest, {}))
for opt in values.split(','):
if '=' in opt:
n, v = opt.split('=', 1)
v = v.lower() not in ('false', 'f', '0', 'no')
else:
n, v = opt, True
options[n] = v
setattr(namespace, self.dest, options)
class ParseCompileTimeEnvAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
old_env = dict(getattr(namespace, self.dest, {}))
new_env = Options.parse_compile_time_env(values, current_settings=old_env)
setattr(namespace, self.dest, new_env)
class ActivateAllWarningsAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
directives = getattr(namespace, 'compiler_directives', {})
directives.update(Options.extra_warnings)
namespace.compiler_directives = directives
class SetLenientAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
namespace.error_on_unknown_names = False
namespace.error_on_uninitialized = False
class SetGDBDebugAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
namespace.gdb_debug = True
namespace.output_dir = os.curdir
class SetGDBDebugOutputAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
namespace.gdb_debug = True
namespace.output_dir = values
class SetAnnotateCoverageAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
namespace.annotate = True
namespace.annotate_coverage_xml = values
def create_cython_argparser():
description = "Cython (https://cython.org/) is a compiler for code written in the "\
"Cython language. Cython is based on Pyrex by Greg Ewing."
parser = ArgumentParser(description=description, argument_default=SUPPRESS)
parser.add_argument("-V", "--version", dest='show_version', action='store_const', const=1,
help='Display version number of cython compiler')
parser.add_argument("-l", "--create-listing", dest='use_listing_file', action='store_const', const=1,
help='Write error messages to a listing file')
parser.add_argument("-I", "--include-dir", dest='include_path', action='append',
help='Search for include files in named directory '
'(multiple include directories are allowed).')
parser.add_argument("-o", "--output-file", dest='output_file', action='store', type=str,
help='Specify name of generated C file')
parser.add_argument("-t", "--timestamps", dest='timestamps', action='store_const', const=1,
help='Only compile newer source files')
parser.add_argument("-f", "--force", dest='timestamps', action='store_const', const=0,
help='Compile all source files (overrides implied -t)')
parser.add_argument("-v", "--verbose", dest='verbose', action='count',
help='Be verbose, print file names on multiple compilation')
parser.add_argument("-p", "--embed-positions", dest='embed_pos_in_docstring', action='store_const', const=1,
help='If specified, the positions in Cython files of each '
'function definition is embedded in its docstring.')
parser.add_argument("--cleanup", dest='generate_cleanup_code', action='store', type=int,
help='Release interned objects on python exit, for memory debugging. '
'Level indicates aggressiveness, default 0 releases nothing.')
parser.add_argument("-w", "--working", dest='working_path', action='store', type=str,
help='Sets the working directory for Cython (the directory modules are searched from)')
parser.add_argument("--gdb", action=SetGDBDebugAction, nargs=0,
help='Output debug information for cygdb')
parser.add_argument("--gdb-outdir", action=SetGDBDebugOutputAction, type=str,
help='Specify gdb debug information output directory. Implies --gdb.')
parser.add_argument("-D", "--no-docstrings", dest='docstrings', action='store_false',
help='Strip docstrings from the compiled module.')
parser.add_argument('-a', '--annotate', action='store_const', const='default', dest='annotate',
help='Produce a colorized HTML version of the source.')
parser.add_argument('--annotate-fullc', action='store_const', const='fullc', dest='annotate',
help='Produce a colorized HTML version of the source '
'which includes entire generated C/C++-code.')
parser.add_argument("--annotate-coverage", dest='annotate_coverage_xml', action=SetAnnotateCoverageAction, type=str,
help='Annotate and include coverage information from cov.xml.')
parser.add_argument("--line-directives", dest='emit_linenums', action='store_true',
help='Produce #line directives pointing to the .pyx source')
parser.add_argument("-+", "--cplus", dest='cplus', action='store_const', const=1,
help='Output a C++ rather than C file.')
parser.add_argument('--embed', action='store_const', const='main',
help='Generate a main() function that embeds the Python interpreter. '
'Pass --embed= for a name other than main().')
parser.add_argument('-2', dest='language_level', action='store_const', const=2,
help='Compile based on Python-2 syntax and code semantics.')
parser.add_argument('-3', dest='language_level', action='store_const', const=3,
help='Compile based on Python-3 syntax and code semantics.')
parser.add_argument('--3str', dest='language_level', action='store_const', const='3str',
help='Compile based on Python-3 syntax and code semantics without '
'assuming unicode by default for string literals under Python 2.')
parser.add_argument("--lenient", action=SetLenientAction, nargs=0,
help='Change some compile time errors to runtime errors to '
'improve Python compatibility')
parser.add_argument("--capi-reexport-cincludes", dest='capi_reexport_cincludes', action='store_true',
help='Add cincluded headers to any auto-generated header files.')
parser.add_argument("--fast-fail", dest='fast_fail', action='store_true',
help='Abort the compilation on the first error')
parser.add_argument("-Werror", "--warning-errors", dest='warning_errors', action='store_true',
help='Make all warnings into errors')
parser.add_argument("-Wextra", "--warning-extra", action=ActivateAllWarningsAction, nargs=0,
help='Enable extra warnings')
parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...',
dest='compiler_directives', type=str,
action=ParseDirectivesAction,
help='Overrides a compiler directive')
parser.add_argument('-E', '--compile-time-env', metavar='NAME=VALUE,...',
dest='compile_time_env', type=str,
action=ParseCompileTimeEnvAction,
help='Provides compile time env like DEF would do.')
parser.add_argument('sources', nargs='*', default=[])
# TODO: add help
parser.add_argument("-z", "--pre-import", dest='pre_import', action='store', type=str, help=SUPPRESS)
parser.add_argument("--convert-range", dest='convert_range', action='store_true', help=SUPPRESS)
parser.add_argument("--no-c-in-traceback", dest='c_line_in_traceback', action='store_false', help=SUPPRESS)
parser.add_argument("--cimport-from-pyx", dest='cimport_from_pyx', action='store_true', help=SUPPRESS)
parser.add_argument("--old-style-globals", dest='old_style_globals', action='store_true', help=SUPPRESS)
# debug stuff:
from . import DebugFlags
for name in vars(DebugFlags):
if name.startswith("debug"):
option_name = name.replace('_', '-')
parser.add_argument("--" + option_name, action='store_true', help=SUPPRESS)
return parser
def parse_command_line_raw(parser, args):
# special handling for --embed and --embed=xxxx as they aren't correctly parsed
def filter_out_embed_options(args):
with_embed, without_embed = [], []
for x in args:
if x == '--embed' or x.startswith('--embed='):
with_embed.append(x)
else:
without_embed.append(x)
return with_embed, without_embed
with_embed, args_without_embed = filter_out_embed_options(args)
arguments, unknown = parser.parse_known_args(args_without_embed)
sources = arguments.sources
del arguments.sources
# unknown can be either debug, embed or input files or really unknown
for option in unknown:
if option.startswith('-'):
parser.error("unknown option " + option)
else:
sources.append(option)
# embed-stuff must be handled extra:
for x in with_embed:
if x == '--embed':
name = 'main' # default value
else:
name = x[len('--embed='):]
setattr(arguments, 'embed', name)
return arguments, sources
def parse_command_line(args):
parser = create_cython_argparser()
arguments, sources = parse_command_line_raw(parser, args)
options = Options.CompilationOptions(Options.default_options)
for name, value in vars(arguments).items():
if name.startswith('debug'):
from . import DebugFlags
if name in dir(DebugFlags):
setattr(DebugFlags, name, value)
else:
parser.error("Unknown debug flag: %s\n" % name)
elif hasattr(Options, name):
setattr(Options, name, value)
else:
setattr(options, name, value)
if options.use_listing_file and len(sources) > 1:
parser.error("cython: Only one source file allowed when using -o\n")
if len(sources) == 0 and not options.show_version:
parser.error("cython: Need at least one source file\n")
if Options.embed and len(sources) > 1:
parser.error("cython: Only one source file allowed when using -embed\n")
return options, sources
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/Code.pxd 0000664 0000000 0000000 00000006371 13766135517 0027667 0 ustar 00root root 0000000 0000000
from __future__ import absolute_import
cimport cython
from ..StringIOTree cimport StringIOTree
cdef class UtilityCodeBase(object):
cpdef format_code(self, code_string, replace_empty_lines=*)
cdef class UtilityCode(UtilityCodeBase):
cdef public object name
cdef public object proto
cdef public object impl
cdef public object init
cdef public object cleanup
cdef public object proto_block
cdef public object requires
cdef public dict _cache
cdef public list specialize_list
cdef public object file
cpdef none_or_sub(self, s, context)
cdef class FunctionState:
cdef public set names_taken
cdef public object owner
cdef public object scope
cdef public object error_label
cdef public size_t label_counter
cdef public set labels_used
cdef public object return_label
cdef public object continue_label
cdef public object break_label
cdef public list yield_labels
cdef public object return_from_error_cleanup_label # not used in __init__ ?
cdef public object exc_vars
cdef public object current_except
cdef public bint in_try_finally
cdef public bint can_trace
cdef public bint gil_owned
cdef public list temps_allocated
cdef public dict temps_free
cdef public dict temps_used_type
cdef public size_t temp_counter
cdef public list collect_temps_stack
cdef public object closure_temps
cdef public bint should_declare_error_indicator
cdef public bint uses_error_indicator
@cython.locals(n=size_t)
cpdef new_label(self, name=*)
cpdef tuple get_loop_labels(self)
cpdef set_loop_labels(self, labels)
cpdef tuple get_all_labels(self)
cpdef set_all_labels(self, labels)
cpdef start_collecting_temps(self)
cpdef stop_collecting_temps(self)
cpdef list temps_in_use(self)
cdef class IntConst:
cdef public object cname
cdef public object value
cdef public bint is_long
cdef class PyObjectConst:
cdef public object cname
cdef public object type
cdef class StringConst:
cdef public object cname
cdef public object text
cdef public object escaped_value
cdef public dict py_strings
cdef public list py_versions
@cython.locals(intern=bint, is_str=bint, is_unicode=bint)
cpdef get_py_string_const(self, encoding, identifier=*, is_str=*, py3str_cstring=*)
## cdef class PyStringConst:
## cdef public object cname
## cdef public object encoding
## cdef public bint is_str
## cdef public bint is_unicode
## cdef public bint intern
#class GlobalState(object):
#def funccontext_property(name):
cdef class CCodeWriter(object):
cdef readonly StringIOTree buffer
cdef readonly list pyclass_stack
cdef readonly object globalstate
cdef readonly object funcstate
cdef object code_config
cdef object last_pos
cdef object last_marked_pos
cdef Py_ssize_t level
cdef public Py_ssize_t call_level # debug-only, see Nodes.py
cdef bint bol
cpdef write(self, s)
cpdef put(self, code)
cpdef put_safe(self, code)
cpdef putln(self, code=*, bint safe=*)
@cython.final
cdef increase_indent(self)
@cython.final
cdef decrease_indent(self)
cdef class PyrexCodeWriter:
cdef public object f
cdef public Py_ssize_t level
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/Code.py 0000664 0000000 0000000 00000300553 13766135517 0027523 0 ustar 00root root 0000000 0000000 # cython: language_level=3str
# cython: auto_pickle=False
#
# Code output module
#
from __future__ import absolute_import
import cython
cython.declare(os=object, re=object, operator=object, textwrap=object,
Template=object, Naming=object, Options=object, StringEncoding=object,
Utils=object, SourceDescriptor=object, StringIOTree=object,
DebugFlags=object, basestring=object, defaultdict=object,
closing=object, partial=object)
import hashlib
import operator
import os
import re
import shutil
import textwrap
from string import Template
from functools import partial
from contextlib import closing
from collections import defaultdict
from . import Naming
from . import Options
from . import DebugFlags
from . import StringEncoding
from . import Version
from .. import Utils
from .Scanning import SourceDescriptor
from ..StringIOTree import StringIOTree
try:
from __builtin__ import basestring
except ImportError:
from builtins import str as basestring
non_portable_builtins_map = {
# builtins that have different names in different Python versions
'bytes' : ('PY_MAJOR_VERSION < 3', 'str'),
'unicode' : ('PY_MAJOR_VERSION >= 3', 'str'),
'basestring' : ('PY_MAJOR_VERSION >= 3', 'str'),
'xrange' : ('PY_MAJOR_VERSION >= 3', 'range'),
'raw_input' : ('PY_MAJOR_VERSION >= 3', 'input'),
}
ctypedef_builtins_map = {
# types of builtins in "ctypedef class" statements which we don't
# import either because the names conflict with C types or because
# the type simply is not exposed.
'py_int' : '&PyInt_Type',
'py_long' : '&PyLong_Type',
'py_float' : '&PyFloat_Type',
'wrapper_descriptor' : '&PyWrapperDescr_Type',
}
basicsize_builtins_map = {
# builtins whose type has a different tp_basicsize than sizeof(...)
'PyTypeObject': 'PyHeapTypeObject',
}
uncachable_builtins = [
# Global/builtin names that cannot be cached because they may or may not
# be available at import time, for various reasons:
## - Py3.7+
'breakpoint', # might deserve an implementation in Cython
## - Py3.4+
'__loader__',
'__spec__',
## - Py3+
'BlockingIOError',
'BrokenPipeError',
'ChildProcessError',
'ConnectionAbortedError',
'ConnectionError',
'ConnectionRefusedError',
'ConnectionResetError',
'FileExistsError',
'FileNotFoundError',
'InterruptedError',
'IsADirectoryError',
'ModuleNotFoundError',
'NotADirectoryError',
'PermissionError',
'ProcessLookupError',
'RecursionError',
'ResourceWarning',
#'StopAsyncIteration', # backported
'TimeoutError',
'__build_class__',
'ascii', # might deserve an implementation in Cython
#'exec', # implemented in Cython
## - platform specific
'WindowsError',
## - others
'_', # e.g. used by gettext
]
special_py_methods = set([
'__cinit__', '__dealloc__', '__richcmp__', '__next__',
'__await__', '__aiter__', '__anext__',
'__getreadbuffer__', '__getwritebuffer__', '__getsegcount__',
'__getcharbuffer__', '__getbuffer__', '__releasebuffer__'
])
modifier_output_mapper = {
'inline': 'CYTHON_INLINE'
}.get
class IncludeCode(object):
"""
An include file and/or verbatim C code to be included in the
generated sources.
"""
# attributes:
#
# pieces {order: unicode}: pieces of C code to be generated.
# For the included file, the key "order" is zero.
# For verbatim include code, the "order" is the "order"
# attribute of the original IncludeCode where this piece
# of C code was first added. This is needed to prevent
# duplication if the same include code is found through
# multiple cimports.
# location int: where to put this include in the C sources, one
# of the constants INITIAL, EARLY, LATE
# order int: sorting order (automatically set by increasing counter)
# Constants for location. If the same include occurs with different
# locations, the earliest one takes precedense.
INITIAL = 0
EARLY = 1
LATE = 2
counter = 1 # Counter for "order"
def __init__(self, include=None, verbatim=None, late=True, initial=False):
self.order = self.counter
type(self).counter += 1
self.pieces = {}
if include:
if include[0] == '<' and include[-1] == '>':
self.pieces[0] = u'#include {0}'.format(include)
late = False # system include is never late
else:
self.pieces[0] = u'#include "{0}"'.format(include)
if verbatim:
self.pieces[self.order] = verbatim
if initial:
self.location = self.INITIAL
elif late:
self.location = self.LATE
else:
self.location = self.EARLY
def dict_update(self, d, key):
"""
Insert `self` in dict `d` with key `key`. If that key already
exists, update the attributes of the existing value with `self`.
"""
if key in d:
other = d[key]
other.location = min(self.location, other.location)
other.pieces.update(self.pieces)
else:
d[key] = self
def sortkey(self):
return self.order
def mainpiece(self):
"""
Return the main piece of C code, corresponding to the include
file. If there was no include file, return None.
"""
return self.pieces.get(0)
def write(self, code):
# Write values of self.pieces dict, sorted by the keys
for k in sorted(self.pieces):
code.putln(self.pieces[k])
def get_utility_dir():
# make this a function and not global variables:
# http://trac.cython.org/cython_trac/ticket/475
Cython_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
return os.path.join(Cython_dir, "Utility")
read_utilities_hook = None
"""
Override the hook for reading a utilities file that contains code fragments used
by the codegen.
The hook functions takes the path of the utilities file, and returns a list
of strings, one per line.
The default behavior is to open a file relative to get_utility_dir().
"""
def read_utilities_from_utility_dir(path):
"""
Read all lines of the file at the provided path from a path relative
to get_utility_dir().
"""
filename = os.path.join(get_utility_dir(), path)
with closing(Utils.open_source_file(filename, encoding='UTF-8')) as f:
return f.readlines()
# by default, read utilities from the utility directory.
read_utilities_hook = read_utilities_from_utility_dir
class UtilityCodeBase(object):
"""
Support for loading utility code from a file.
Code sections in the file can be specified as follows:
##### MyUtility.proto #####
[proto declarations]
##### MyUtility.init #####
[code run at module initialization]
##### MyUtility #####
#@requires: MyOtherUtility
#@substitute: naming
[definitions]
##### MyUtility #####
#@subsitute: tempita
[requires tempita substitution
- context can't be specified here though so only
tempita utility that requires no external context
will benefit from this tag
- only necessary when @required from non-tempita code]
for prototypes and implementation respectively. For non-python or
-cython files backslashes should be used instead. 5 to 30 comment
characters may be used on either side.
If the @cname decorator is not used and this is a CythonUtilityCode,
one should pass in the 'name' keyword argument to be used for name
mangling of such entries.
"""
is_cython_utility = False
_utility_cache = {}
@classmethod
def _add_utility(cls, utility, type, lines, begin_lineno, tags=None):
if utility is None:
return
code = '\n'.join(lines)
if tags and 'substitute' in tags and 'naming' in tags['substitute']:
try:
code = Template(code).substitute(vars(Naming))
except (KeyError, ValueError) as e:
raise RuntimeError("Error parsing templated utility code of type '%s' at line %d: %s" % (
type, begin_lineno, e))
# remember correct line numbers at least until after templating
code = '\n' * begin_lineno + code
if type == 'proto':
utility[0] = code
elif type == 'impl':
utility[1] = code
else:
all_tags = utility[2]
all_tags[type] = code
if tags:
all_tags = utility[2]
for name, values in tags.items():
all_tags.setdefault(name, set()).update(values)
@classmethod
def load_utilities_from_file(cls, path):
utilities = cls._utility_cache.get(path)
if utilities:
return utilities
_, ext = os.path.splitext(path)
if ext in ('.pyx', '.py', '.pxd', '.pxi'):
comment = '#'
strip_comments = partial(re.compile(r'^\s*#(?!\s*cython\s*:).*').sub, '')
rstrip = StringEncoding._unicode.rstrip
else:
comment = '/'
strip_comments = partial(re.compile(r'^\s*//.*|/\*[^*]*\*/').sub, '')
rstrip = partial(re.compile(r'\s+(\\?)$').sub, r'\1')
match_special = re.compile(
(r'^%(C)s{5,30}\s*(?P(?:\w|\.)+)\s*%(C)s{5,30}|'
r'^%(C)s+@(?P\w+)\s*:\s*(?P(?:\w|[.:])+)') %
{'C': comment}).match
match_type = re.compile(r'(.+)[.](proto(?:[.]\S+)?|impl|init|cleanup)$').match
all_lines = read_utilities_hook(path)
utilities = defaultdict(lambda: [None, None, {}])
lines = []
tags = defaultdict(set)
utility = type = None
begin_lineno = 0
for lineno, line in enumerate(all_lines):
m = match_special(line)
if m:
if m.group('name'):
cls._add_utility(utility, type, lines, begin_lineno, tags)
begin_lineno = lineno + 1
del lines[:]
tags.clear()
name = m.group('name')
mtype = match_type(name)
if mtype:
name, type = mtype.groups()
else:
type = 'impl'
utility = utilities[name]
else:
tags[m.group('tag')].add(m.group('value'))
lines.append('') # keep line number correct
else:
lines.append(rstrip(strip_comments(line)))
if utility is None:
raise ValueError("Empty utility code file")
# Don't forget to add the last utility code
cls._add_utility(utility, type, lines, begin_lineno, tags)
utilities = dict(utilities) # un-defaultdict-ify
cls._utility_cache[path] = utilities
return utilities
@classmethod
def load(cls, util_code_name, from_file, **kwargs):
"""
Load utility code from a file specified by from_file (relative to
Cython/Utility) and name util_code_name.
"""
if '::' in util_code_name:
from_file, util_code_name = util_code_name.rsplit('::', 1)
assert from_file
utilities = cls.load_utilities_from_file(from_file)
proto, impl, tags = utilities[util_code_name]
if tags:
if "substitute" in tags and "tempita" in tags["substitute"]:
if not issubclass(cls, TempitaUtilityCode):
return TempitaUtilityCode.load(util_code_name, from_file, **kwargs)
orig_kwargs = kwargs.copy()
for name, values in tags.items():
if name in kwargs:
continue
# only pass lists when we have to: most argument expect one value or None
if name == 'requires':
if orig_kwargs:
values = [cls.load(dep, from_file, **orig_kwargs)
for dep in sorted(values)]
else:
# dependencies are rarely unique, so use load_cached() when we can
values = [cls.load_cached(dep, from_file)
for dep in sorted(values)]
elif name == 'substitute':
# don't want to pass "naming" or "tempita" to the constructor
# since these will have been handled
values = values - set(['naming', 'tempita'])
if not values:
continue
elif not values:
values = None
elif len(values) == 1:
values = list(values)[0]
kwargs[name] = values
if proto is not None:
kwargs['proto'] = proto
if impl is not None:
kwargs['impl'] = impl
if 'name' not in kwargs:
kwargs['name'] = util_code_name
if 'file' not in kwargs and from_file:
kwargs['file'] = from_file
return cls(**kwargs)
@classmethod
def load_cached(cls, utility_code_name, from_file, __cache={}):
"""
Calls .load(), but using a per-type cache based on utility name and file name.
"""
key = (utility_code_name, from_file, cls)
try:
return __cache[key]
except KeyError:
pass
code = __cache[key] = cls.load(utility_code_name, from_file)
return code
@classmethod
def load_as_string(cls, util_code_name, from_file, **kwargs):
"""
Load a utility code as a string. Returns (proto, implementation)
"""
util = cls.load(util_code_name, from_file, **kwargs)
proto, impl = util.proto, util.impl
return util.format_code(proto), util.format_code(impl)
def format_code(self, code_string, replace_empty_lines=re.compile(r'\n\n+').sub):
"""
Format a code section for output.
"""
if code_string:
code_string = replace_empty_lines('\n', code_string.strip()) + '\n\n'
return code_string
def __str__(self):
return "<%s(%s)>" % (type(self).__name__, self.name)
def get_tree(self, **kwargs):
pass
def __deepcopy__(self, memodict=None):
# No need to deep-copy utility code since it's essentially immutable.
return self
class UtilityCode(UtilityCodeBase):
"""
Stores utility code to add during code generation.
See GlobalState.put_utility_code.
hashes/equals by instance
proto C prototypes
impl implementation code
init code to call on module initialization
requires utility code dependencies
proto_block the place in the resulting file where the prototype should
end up
name name of the utility code (or None)
file filename of the utility code file this utility was loaded
from (or None)
"""
def __init__(self, proto=None, impl=None, init=None, cleanup=None, requires=None,
proto_block='utility_code_proto', name=None, file=None):
# proto_block: Which code block to dump prototype in. See GlobalState.
self.proto = proto
self.impl = impl
self.init = init
self.cleanup = cleanup
self.requires = requires
self._cache = {}
self.specialize_list = []
self.proto_block = proto_block
self.name = name
self.file = file
def __hash__(self):
return hash((self.proto, self.impl))
def __eq__(self, other):
if self is other:
return True
self_type, other_type = type(self), type(other)
if self_type is not other_type and not (isinstance(other, self_type) or isinstance(self, other_type)):
return False
self_proto = getattr(self, 'proto', None)
other_proto = getattr(other, 'proto', None)
return (self_proto, self.impl) == (other_proto, other.impl)
def none_or_sub(self, s, context):
"""
Format a string in this utility code with context. If None, do nothing.
"""
if s is None:
return None
return s % context
def specialize(self, pyrex_type=None, **data):
# Dicts aren't hashable...
if pyrex_type is not None:
data['type'] = pyrex_type.empty_declaration_code()
data['type_name'] = pyrex_type.specialization_name()
key = tuple(sorted(data.items()))
try:
return self._cache[key]
except KeyError:
if self.requires is None:
requires = None
else:
requires = [r.specialize(data) for r in self.requires]
s = self._cache[key] = UtilityCode(
self.none_or_sub(self.proto, data),
self.none_or_sub(self.impl, data),
self.none_or_sub(self.init, data),
self.none_or_sub(self.cleanup, data),
requires,
self.proto_block)
self.specialize_list.append(s)
return s
def inject_string_constants(self, impl, output):
"""Replace 'PYIDENT("xyz")' by a constant Python identifier cname.
"""
if 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl:
return False, impl
replacements = {}
def externalise(matchobj):
key = matchobj.groups()
try:
cname = replacements[key]
except KeyError:
str_type, name = key
cname = replacements[key] = output.get_py_string_const(
StringEncoding.EncodedString(name), identifier=str_type == 'IDENT').cname
return cname
impl = re.sub(r'PY(IDENT|UNICODE)\("([^"]+)"\)', externalise, impl)
assert 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl
return True, impl
def inject_unbound_methods(self, impl, output):
"""Replace 'UNBOUND_METHOD(type, "name")' by a constant Python identifier cname.
"""
if 'CALL_UNBOUND_METHOD(' not in impl:
return False, impl
def externalise(matchobj):
type_cname, method_name, obj_cname, args = matchobj.groups()
args = [arg.strip() for arg in args[1:].split(',')] if args else []
assert len(args) < 3, "CALL_UNBOUND_METHOD() does not support %d call arguments" % len(args)
return output.cached_unbound_method_call_code(obj_cname, type_cname, method_name, args)
impl = re.sub(
r'CALL_UNBOUND_METHOD\('
r'([a-zA-Z_]+),' # type cname
r'\s*"([^"]+)",' # method name
r'\s*([^),]+)' # object cname
r'((?:,\s*[^),]+)*)' # args*
r'\)', externalise, impl)
assert 'CALL_UNBOUND_METHOD(' not in impl
return True, impl
def wrap_c_strings(self, impl):
"""Replace CSTRING('''xyz''') by a C compatible string
"""
if 'CSTRING(' not in impl:
return impl
def split_string(matchobj):
content = matchobj.group(1).replace('"', '\042')
return ''.join(
'"%s\\n"\n' % line if not line.endswith('\\') or line.endswith('\\\\') else '"%s"\n' % line[:-1]
for line in content.splitlines())
impl = re.sub(r'CSTRING\(\s*"""([^"]*(?:"[^"]+)*)"""\s*\)', split_string, impl)
assert 'CSTRING(' not in impl
return impl
def put_code(self, output):
if self.requires:
for dependency in self.requires:
output.use_utility_code(dependency)
if self.proto:
writer = output[self.proto_block]
writer.putln("/* %s.proto */" % self.name)
writer.put_or_include(
self.format_code(self.proto), '%s_proto' % self.name)
if self.impl:
impl = self.format_code(self.wrap_c_strings(self.impl))
is_specialised1, impl = self.inject_string_constants(impl, output)
is_specialised2, impl = self.inject_unbound_methods(impl, output)
writer = output['utility_code_def']
writer.putln("/* %s */" % self.name)
if not (is_specialised1 or is_specialised2):
# no module specific adaptations => can be reused
writer.put_or_include(impl, '%s_impl' % self.name)
else:
writer.put(impl)
if self.init:
writer = output['init_globals']
writer.putln("/* %s.init */" % self.name)
if isinstance(self.init, basestring):
writer.put(self.format_code(self.init))
else:
self.init(writer, output.module_pos)
writer.putln(writer.error_goto_if_PyErr(output.module_pos))
writer.putln()
if self.cleanup and Options.generate_cleanup_code:
writer = output['cleanup_globals']
writer.putln("/* %s.cleanup */" % self.name)
if isinstance(self.cleanup, basestring):
writer.put_or_include(
self.format_code(self.cleanup),
'%s_cleanup' % self.name)
else:
self.cleanup(writer, output.module_pos)
def sub_tempita(s, context, file=None, name=None):
"Run tempita on string s with given context."
if not s:
return None
if file:
context['__name'] = "%s:%s" % (file, name)
elif name:
context['__name'] = name
from ..Tempita import sub
return sub(s, **context)
class TempitaUtilityCode(UtilityCode):
def __init__(self, name=None, proto=None, impl=None, init=None, file=None, context=None, **kwargs):
if context is None:
context = {}
proto = sub_tempita(proto, context, file, name)
impl = sub_tempita(impl, context, file, name)
init = sub_tempita(init, context, file, name)
super(TempitaUtilityCode, self).__init__(
proto, impl, init=init, name=name, file=file, **kwargs)
@classmethod
def load_cached(cls, utility_code_name, from_file=None, context=None, __cache={}):
context_key = tuple(sorted(context.items())) if context else None
assert hash(context_key) is not None # raise TypeError if not hashable
key = (cls, from_file, utility_code_name, context_key)
try:
return __cache[key]
except KeyError:
pass
code = __cache[key] = cls.load(utility_code_name, from_file, context=context)
return code
def none_or_sub(self, s, context):
"""
Format a string in this utility code with context. If None, do nothing.
"""
if s is None:
return None
return sub_tempita(s, context, self.file, self.name)
class LazyUtilityCode(UtilityCodeBase):
"""
Utility code that calls a callback with the root code writer when
available. Useful when you only have 'env' but not 'code'.
"""
__name__ = ''
requires = None
def __init__(self, callback):
self.callback = callback
def put_code(self, globalstate):
utility = self.callback(globalstate.rootwriter)
globalstate.use_utility_code(utility)
class FunctionState(object):
# return_label string function return point label
# error_label string error catch point label
# continue_label string loop continue point label
# break_label string loop break point label
# return_from_error_cleanup_label string
# label_counter integer counter for naming labels
# in_try_finally boolean inside try of try...finally
# exc_vars (string * 3) exception variables for reraise, or None
# can_trace boolean line tracing is supported in the current context
# scope Scope the scope object of the current function
# Not used for now, perhaps later
def __init__(self, owner, names_taken=set(), scope=None):
self.names_taken = names_taken
self.owner = owner
self.scope = scope
self.error_label = None
self.label_counter = 0
self.labels_used = set()
self.return_label = self.new_label()
self.new_error_label()
self.continue_label = None
self.break_label = None
self.yield_labels = []
self.in_try_finally = 0
self.exc_vars = None
self.current_except = None
self.can_trace = False
self.gil_owned = True
self.temps_allocated = [] # of (name, type, manage_ref, static)
self.temps_free = {} # (type, manage_ref) -> list of free vars with same type/managed status
self.temps_used_type = {} # name -> (type, manage_ref)
self.zombie_temps = set() # temps that must not be reused after release
self.temp_counter = 0
self.closure_temps = None
# This is used to collect temporaries, useful to find out which temps
# need to be privatized in parallel sections
self.collect_temps_stack = []
# This is used for the error indicator, which needs to be local to the
# function. It used to be global, which relies on the GIL being held.
# However, exceptions may need to be propagated through 'nogil'
# sections, in which case we introduce a race condition.
self.should_declare_error_indicator = False
self.uses_error_indicator = False
# safety checks
def validate_exit(self):
# validate that all allocated temps have been freed
if self.temps_allocated:
leftovers = self.temps_in_use()
if leftovers:
msg = "TEMPGUARD: Temps left over at end of '%s': %s" % (self.scope.name, ', '.join([
'%s [%s]' % (name, ctype)
for name, ctype, is_pytemp in sorted(leftovers)]),
)
#print(msg)
raise RuntimeError(msg)
# labels
def new_label(self, name=None):
n = self.label_counter
self.label_counter = n + 1
label = "%s%d" % (Naming.label_prefix, n)
if name is not None:
label += '_' + name
return label
def new_yield_label(self, expr_type='yield'):
label = self.new_label('resume_from_%s' % expr_type)
num_and_label = (len(self.yield_labels) + 1, label)
self.yield_labels.append(num_and_label)
return num_and_label
def new_error_label(self):
old_err_lbl = self.error_label
self.error_label = self.new_label('error')
return old_err_lbl
def get_loop_labels(self):
return (
self.continue_label,
self.break_label)
def set_loop_labels(self, labels):
(self.continue_label,
self.break_label) = labels
def new_loop_labels(self):
old_labels = self.get_loop_labels()
self.set_loop_labels(
(self.new_label("continue"),
self.new_label("break")))
return old_labels
def get_all_labels(self):
return (
self.continue_label,
self.break_label,
self.return_label,
self.error_label)
def set_all_labels(self, labels):
(self.continue_label,
self.break_label,
self.return_label,
self.error_label) = labels
def all_new_labels(self):
old_labels = self.get_all_labels()
new_labels = []
for old_label, name in zip(old_labels, ['continue', 'break', 'return', 'error']):
if old_label:
new_labels.append(self.new_label(name))
else:
new_labels.append(old_label)
self.set_all_labels(new_labels)
return old_labels
def use_label(self, lbl):
self.labels_used.add(lbl)
def label_used(self, lbl):
return lbl in self.labels_used
# temp handling
def allocate_temp(self, type, manage_ref, static=False, reusable=True):
"""
Allocates a temporary (which may create a new one or get a previously
allocated and released one of the same type). Type is simply registered
and handed back, but will usually be a PyrexType.
If type.is_pyobject, manage_ref comes into play. If manage_ref is set to
True, the temp will be decref-ed on return statements and in exception
handling clauses. Otherwise the caller has to deal with any reference
counting of the variable.
If not type.is_pyobject, then manage_ref will be ignored, but it
still has to be passed. It is recommended to pass False by convention
if it is known that type will never be a Python object.
static=True marks the temporary declaration with "static".
This is only used when allocating backing store for a module-level
C array literals.
if reusable=False, the temp will not be reused after release.
A C string referring to the variable is returned.
"""
if type.is_cv_qualified and not type.is_reference:
type = type.cv_base_type
elif type.is_reference and not type.is_fake_reference:
type = type.ref_base_type
elif type.is_cfunction:
from . import PyrexTypes
type = PyrexTypes.c_ptr_type(type) # A function itself isn't an l-value
if not type.is_pyobject and not type.is_memoryviewslice and not type.is_cyp_class:
# Make manage_ref canonical, so that manage_ref will always mean
# a decref is needed.
manage_ref = False
if type.is_checked_result:
manage_ref = False
freelist = self.temps_free.get((type, manage_ref))
if reusable and freelist is not None and freelist[0]:
result = freelist[0].pop()
freelist[1].remove(result)
else:
while True:
self.temp_counter += 1
result = "%s%d" % (Naming.codewriter_temp_prefix, self.temp_counter)
if result not in self.names_taken: break
self.temps_allocated.append((result, type, manage_ref, static))
if not reusable:
self.zombie_temps.add(result)
self.temps_used_type[result] = (type, manage_ref)
if DebugFlags.debug_temp_code_comments:
self.owner.putln("/* %s allocated (%s)%s */" % (result, type, "" if reusable else " - zombie"))
if self.collect_temps_stack:
self.collect_temps_stack[-1].add((result, type))
return result
def release_temp(self, name):
"""
Releases a temporary so that it can be reused by other code needing
a temp of the same type.
"""
type, manage_ref = self.temps_used_type[name]
freelist = self.temps_free.get((type, manage_ref))
if freelist is None:
freelist = ([], set()) # keep order in list and make lookups in set fast
self.temps_free[(type, manage_ref)] = freelist
if name in freelist[1]:
raise RuntimeError("Temp %s freed twice!" % name)
if name not in self.zombie_temps:
freelist[0].append(name)
freelist[1].add(name)
if DebugFlags.debug_temp_code_comments:
self.owner.putln("/* %s released %s*/" % (
name, " - zombie" if name in self.zombie_temps else ""))
def temps_in_use(self):
"""Return a list of (cname,type,manage_ref) tuples of temp names and their type
that are currently in use.
"""
used = []
for name, type, manage_ref, static in self.temps_allocated:
freelist = self.temps_free.get((type, manage_ref))
if freelist is None or name not in freelist[1]:
used.append((name, type, manage_ref and type.is_pyobject))
return used
def temps_holding_reference(self):
"""Return a list of (cname,type) tuples of temp names and their type
that are currently in use. This includes only temps of a
Python object type which owns its reference.
"""
return [(name, type)
for name, type, manage_ref in self.temps_in_use()
if manage_ref and type.is_pyobject]
def all_managed_temps(self):
"""Return a list of (cname, type) tuples of refcount-managed Python objects.
"""
return [(cname, type)
for cname, type, manage_ref, static in self.temps_allocated
if manage_ref]
def all_free_managed_temps(self):
"""Return a list of (cname, type) tuples of refcount-managed Python
objects that are not currently in use. This is used by
try-except and try-finally blocks to clean up temps in the
error case.
"""
return sorted([ # Enforce deterministic order.
(cname, type)
for (type, manage_ref), freelist in self.temps_free.items() if manage_ref
for cname in freelist[0]
])
def start_collecting_temps(self):
"""
Useful to find out which temps were used in a code block
"""
self.collect_temps_stack.append(set())
def stop_collecting_temps(self):
return self.collect_temps_stack.pop()
def init_closure_temps(self, scope):
self.closure_temps = ClosureTempAllocator(scope)
class NumConst(object):
"""Global info about a Python number constant held by GlobalState.
cname string
value string
py_type string int, long, float
value_code string evaluation code if different from value
"""
def __init__(self, cname, value, py_type, value_code=None):
self.cname = cname
self.value = value
self.py_type = py_type
self.value_code = value_code or value
class PyObjectConst(object):
"""Global info about a generic constant held by GlobalState.
"""
# cname string
# type PyrexType
def __init__(self, cname, type):
self.cname = cname
self.type = type
cython.declare(possible_unicode_identifier=object, possible_bytes_identifier=object,
replace_identifier=object, find_alphanums=object)
possible_unicode_identifier = re.compile(br"(?![0-9])\w+$".decode('ascii'), re.U).match
possible_bytes_identifier = re.compile(r"(?![0-9])\w+$".encode('ASCII')).match
replace_identifier = re.compile(r'[^a-zA-Z0-9_]+').sub
find_alphanums = re.compile('([a-zA-Z0-9]+)').findall
class StringConst(object):
"""Global info about a C string constant held by GlobalState.
"""
# cname string
# text EncodedString or BytesLiteral
# py_strings {(identifier, encoding) : PyStringConst}
def __init__(self, cname, text, byte_string):
self.cname = cname
self.text = text
self.escaped_value = StringEncoding.escape_byte_string(byte_string)
self.py_strings = None
self.py_versions = []
def add_py_version(self, version):
if not version:
self.py_versions = [2, 3]
elif version not in self.py_versions:
self.py_versions.append(version)
def get_py_string_const(self, encoding, identifier=None,
is_str=False, py3str_cstring=None):
py_strings = self.py_strings
text = self.text
is_str = bool(identifier or is_str)
is_unicode = encoding is None and not is_str
if encoding is None:
# unicode string
encoding_key = None
else:
# bytes or str
encoding = encoding.lower()
if encoding in ('utf8', 'utf-8', 'ascii', 'usascii', 'us-ascii'):
encoding = None
encoding_key = None
else:
encoding_key = ''.join(find_alphanums(encoding))
key = (is_str, is_unicode, encoding_key, py3str_cstring)
if py_strings is not None:
try:
return py_strings[key]
except KeyError:
pass
else:
self.py_strings = {}
if identifier:
intern = True
elif identifier is None:
if isinstance(text, bytes):
intern = bool(possible_bytes_identifier(text))
else:
intern = bool(possible_unicode_identifier(text))
else:
intern = False
if intern:
prefix = Naming.interned_prefixes['str']
else:
prefix = Naming.py_const_prefix
if encoding_key:
encoding_prefix = '_%s' % encoding_key
else:
encoding_prefix = ''
pystring_cname = "%s%s%s_%s" % (
prefix,
(is_str and 's') or (is_unicode and 'u') or 'b',
encoding_prefix,
self.cname[len(Naming.const_prefix):])
py_string = PyStringConst(
pystring_cname, encoding, is_unicode, is_str, py3str_cstring, intern)
self.py_strings[key] = py_string
return py_string
class PyStringConst(object):
"""Global info about a Python string constant held by GlobalState.
"""
# cname string
# py3str_cstring string
# encoding string
# intern boolean
# is_unicode boolean
# is_str boolean
def __init__(self, cname, encoding, is_unicode, is_str=False,
py3str_cstring=None, intern=False):
self.cname = cname
self.py3str_cstring = py3str_cstring
self.encoding = encoding
self.is_str = is_str
self.is_unicode = is_unicode
self.intern = intern
def __lt__(self, other):
return self.cname < other.cname
class GlobalState(object):
# filename_table {string : int} for finding filename table indexes
# filename_list [string] filenames in filename table order
# input_file_contents dict contents (=list of lines) of any file that was used as input
# to create this output C code. This is
# used to annotate the comments.
#
# utility_codes set IDs of used utility code (to avoid reinsertion)
#
# declared_cnames {string:Entry} used in a transition phase to merge pxd-declared
# constants etc. into the pyx-declared ones (i.e,
# check if constants are already added).
# In time, hopefully the literals etc. will be
# supplied directly instead.
#
# const_cnames_used dict global counter for unique constant identifiers
#
# parts {string:CCodeWriter}
# interned_strings
# consts
# interned_nums
# directives set Temporary variable used to track
# the current set of directives in the code generation
# process.
directives = {}
code_layout = [
'h_code',
'filename_table',
'utility_code_proto_before_types',
'numeric_typedefs', # Let these detailed individual parts stay!,
'complex_type_declarations', # as the proper solution is to make a full DAG...
'type_declarations', # More coarse-grained blocks would simply hide
'utility_code_proto', # the ugliness, not fix it
'module_declarations',
'typeinfo',
'before_global_var',
'global_var',
'string_decls',
'decls',
'late_includes',
'module_state',
'module_state_clear',
'module_state_traverse',
'module_state_defines', # redefines names used in module_state/_clear/_traverse
'module_code', # user code goes here
'pystring_table',
'cached_builtins',
'cached_constants',
'init_globals',
'init_module',
'cleanup_globals',
'cleanup_module',
'main_method',
'utility_code_def',
'end'
]
def __init__(self, writer, module_node, code_config, common_utility_include_dir=None):
self.filename_table = {}
self.filename_list = []
self.input_file_contents = {}
self.utility_codes = set()
self.declared_cnames = {}
self.in_utility_code_generation = False
self.code_config = code_config
self.common_utility_include_dir = common_utility_include_dir
self.parts = {}
self.module_node = module_node # because some utility code generation needs it
# (generating backwards-compatible Get/ReleaseBuffer
self.const_cnames_used = {}
self.string_const_index = {}
self.dedup_const_index = {}
self.pyunicode_ptr_const_index = {}
self.num_const_index = {}
self.py_constants = []
self.cached_cmethods = {}
self.initialised_constants = set()
writer.set_global_state(self)
self.rootwriter = writer
def initialize_main_c_code(self):
rootwriter = self.rootwriter
for i, part in enumerate(self.code_layout):
w = self.parts[part] = rootwriter.insertion_point()
if i > 0:
w.putln("/* #### Code section: %s ### */" % part)
if not Options.cache_builtins:
del self.parts['cached_builtins']
else:
w = self.parts['cached_builtins']
w.enter_cfunc_scope()
w.putln("static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {")
w = self.parts['cached_constants']
w.enter_cfunc_scope()
w.putln("")
w.putln("static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {")
w.put_declare_refcount_context()
w.put_setup_refcount_context(StringEncoding.EncodedString("__Pyx_InitCachedConstants"))
w = self.parts['init_globals']
w.enter_cfunc_scope()
w.putln("")
w.putln("static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {")
if not Options.generate_cleanup_code:
del self.parts['cleanup_globals']
else:
w = self.parts['cleanup_globals']
w.enter_cfunc_scope()
w.putln("")
w.putln("static CYTHON_SMALL_CODE void __Pyx_CleanupGlobals(void) {")
code = self.parts['utility_code_proto']
code.putln("")
code.putln("/* --- Runtime support code (head) --- */")
code = self.parts['utility_code_def']
if self.code_config.emit_linenums:
code.write('\n#line 1 "cython_utility"\n')
code.putln("")
code.putln("/* --- Runtime support code --- */")
def finalize_main_c_code(self):
self.close_global_decls()
#
# utility_code_def
#
code = self.parts['utility_code_def']
util = TempitaUtilityCode.load_cached("TypeConversions", "TypeConversion.c")
code.put(util.format_code(util.impl))
code.putln("")
def __getitem__(self, key):
return self.parts[key]
#
# Global constants, interned objects, etc.
#
def close_global_decls(self):
# This is called when it is known that no more global declarations will
# declared.
self.generate_const_declarations()
if Options.cache_builtins:
w = self.parts['cached_builtins']
w.putln("return 0;")
if w.label_used(w.error_label):
w.put_label(w.error_label)
w.putln("return -1;")
w.putln("}")
w.exit_cfunc_scope()
w = self.parts['cached_constants']
w.put_finish_refcount_context()
w.putln("return 0;")
if w.label_used(w.error_label):
w.put_label(w.error_label)
w.put_finish_refcount_context()
w.putln("return -1;")
w.putln("}")
w.exit_cfunc_scope()
w = self.parts['init_globals']
w.putln("return 0;")
if w.label_used(w.error_label):
w.put_label(w.error_label)
w.putln("return -1;")
w.putln("}")
w.exit_cfunc_scope()
if Options.generate_cleanup_code:
w = self.parts['cleanup_globals']
w.putln("}")
w.exit_cfunc_scope()
if Options.generate_cleanup_code:
w = self.parts['cleanup_module']
w.putln("}")
w.exit_cfunc_scope()
def put_pyobject_decl(self, entry):
self['global_var'].putln("static PyObject *%s;" % entry.cname)
# constant handling at code generation time
def get_cached_constants_writer(self, target=None):
if target is not None:
if target in self.initialised_constants:
# Return None on second/later calls to prevent duplicate creation code.
return None
self.initialised_constants.add(target)
return self.parts['cached_constants']
def get_int_const(self, str_value, longness=False):
py_type = longness and 'long' or 'int'
try:
c = self.num_const_index[(str_value, py_type)]
except KeyError:
c = self.new_num_const(str_value, py_type)
return c
def get_float_const(self, str_value, value_code):
try:
c = self.num_const_index[(str_value, 'float')]
except KeyError:
c = self.new_num_const(str_value, 'float', value_code)
return c
def get_py_const(self, type, prefix='', cleanup_level=None, dedup_key=None):
if dedup_key is not None:
const = self.dedup_const_index.get(dedup_key)
if const is not None:
return const
# create a new Python object constant
const = self.new_py_const(type, prefix)
if cleanup_level is not None \
and cleanup_level <= Options.generate_cleanup_code:
cleanup_writer = self.parts['cleanup_globals']
cleanup_writer.putln('Py_CLEAR(%s);' % const.cname)
if dedup_key is not None:
self.dedup_const_index[dedup_key] = const
return const
def get_string_const(self, text, py_version=None):
# return a C string constant, creating a new one if necessary
if text.is_unicode:
byte_string = text.utf8encode()
else:
byte_string = text.byteencode()
try:
c = self.string_const_index[byte_string]
except KeyError:
c = self.new_string_const(text, byte_string)
c.add_py_version(py_version)
return c
def get_pyunicode_ptr_const(self, text):
# return a Py_UNICODE[] constant, creating a new one if necessary
assert text.is_unicode
try:
c = self.pyunicode_ptr_const_index[text]
except KeyError:
c = self.pyunicode_ptr_const_index[text] = self.new_const_cname()
return c
def get_py_string_const(self, text, identifier=None,
is_str=False, unicode_value=None):
# return a Python string constant, creating a new one if necessary
py3str_cstring = None
if is_str and unicode_value is not None \
and unicode_value.utf8encode() != text.byteencode():
py3str_cstring = self.get_string_const(unicode_value, py_version=3)
c_string = self.get_string_const(text, py_version=2)
else:
c_string = self.get_string_const(text)
py_string = c_string.get_py_string_const(
text.encoding, identifier, is_str, py3str_cstring)
return py_string
def get_interned_identifier(self, text):
return self.get_py_string_const(text, identifier=True)
def new_string_const(self, text, byte_string):
cname = self.new_string_const_cname(byte_string)
c = StringConst(cname, text, byte_string)
self.string_const_index[byte_string] = c
return c
def new_num_const(self, value, py_type, value_code=None):
cname = self.new_num_const_cname(value, py_type)
c = NumConst(cname, value, py_type, value_code)
self.num_const_index[(value, py_type)] = c
return c
def new_py_const(self, type, prefix=''):
cname = self.new_const_cname(prefix)
c = PyObjectConst(cname, type)
self.py_constants.append(c)
return c
def new_string_const_cname(self, bytes_value):
# Create a new globally-unique nice name for a C string constant.
value = bytes_value.decode('ASCII', 'ignore')
return self.new_const_cname(value=value)
def new_num_const_cname(self, value, py_type):
if py_type == 'long':
value += 'L'
py_type = 'int'
prefix = Naming.interned_prefixes[py_type]
cname = "%s%s" % (prefix, value)
cname = cname.replace('+', '_').replace('-', 'neg_').replace('.', '_')
return cname
def new_const_cname(self, prefix='', value=''):
value = replace_identifier('_', value)[:32].strip('_')
used = self.const_cnames_used
name_suffix = value
while name_suffix in used:
counter = used[value] = used[value] + 1
name_suffix = '%s_%d' % (value, counter)
used[name_suffix] = 1
if prefix:
prefix = Naming.interned_prefixes[prefix]
else:
prefix = Naming.const_prefix
return "%s%s" % (prefix, name_suffix)
def get_cached_unbound_method(self, type_cname, method_name):
key = (type_cname, method_name)
try:
cname = self.cached_cmethods[key]
except KeyError:
cname = self.cached_cmethods[key] = self.new_const_cname(
'umethod', '%s_%s' % (type_cname, method_name))
return cname
def cached_unbound_method_call_code(self, obj_cname, type_cname, method_name, arg_cnames):
# admittedly, not the best place to put this method, but it is reused by UtilityCode and ExprNodes ...
utility_code_name = "CallUnboundCMethod%d" % len(arg_cnames)
self.use_utility_code(UtilityCode.load_cached(utility_code_name, "ObjectHandling.c"))
cache_cname = self.get_cached_unbound_method(type_cname, method_name)
args = [obj_cname] + arg_cnames
return "__Pyx_%s(&%s, %s)" % (
utility_code_name,
cache_cname,
', '.join(args),
)
def add_cached_builtin_decl(self, entry):
if entry.is_builtin and entry.is_const:
if self.should_declare(entry.cname, entry):
self.put_pyobject_decl(entry)
w = self.parts['cached_builtins']
condition = None
if entry.name in non_portable_builtins_map:
condition, replacement = non_portable_builtins_map[entry.name]
w.putln('#if %s' % condition)
self.put_cached_builtin_init(
entry.pos, StringEncoding.EncodedString(replacement),
entry.cname)
w.putln('#else')
self.put_cached_builtin_init(
entry.pos, StringEncoding.EncodedString(entry.name),
entry.cname)
if condition:
w.putln('#endif')
def put_cached_builtin_init(self, pos, name, cname):
w = self.parts['cached_builtins']
interned_cname = self.get_interned_identifier(name).cname
self.use_utility_code(
UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
w.putln('%s = __Pyx_GetBuiltinName(%s); if (!%s) %s' % (
cname,
interned_cname,
cname,
w.error_goto(pos)))
def generate_const_declarations(self):
self.generate_cached_methods_decls()
self.generate_string_constants()
self.generate_num_constants()
self.generate_object_constant_decls()
def generate_object_constant_decls(self):
consts = [(len(c.cname), c.cname, c)
for c in self.py_constants]
consts.sort()
decls_writer = self.parts['decls']
decls_writer.putln("#if !CYTHON_COMPILING_IN_LIMITED_API")
for _, cname, c in consts:
self.parts['module_state'].putln("%s;" % c.type.declaration_code(cname))
self.parts['module_state_defines'].putln(
"#define %s %s->%s" % (cname, Naming.modulestateglobal_cname, cname))
self.parts['module_state_clear'].putln(
"Py_CLEAR(clear_module_state->%s);" % cname)
self.parts['module_state_traverse'].putln(
"Py_VISIT(traverse_module_state->%s);" % cname)
decls_writer.putln(
"static %s;" % c.type.declaration_code(cname))
decls_writer.putln("#endif")
def generate_cached_methods_decls(self):
if not self.cached_cmethods:
return
decl = self.parts['decls']
init = self.parts['init_globals']
cnames = []
for (type_cname, method_name), cname in sorted(self.cached_cmethods.items()):
cnames.append(cname)
method_name_cname = self.get_interned_identifier(StringEncoding.EncodedString(method_name)).cname
decl.putln('static __Pyx_CachedCFunction %s = {0, 0, 0, 0, 0};' % (
cname))
# split type reference storage as it might not be static
init.putln('%s.type = (PyObject*)&%s;' % (
cname, type_cname))
# method name string isn't static in limited api
init.putln('%s.method_name = &%s;' % (
cname, method_name_cname))
if Options.generate_cleanup_code:
cleanup = self.parts['cleanup_globals']
for cname in cnames:
cleanup.putln("Py_CLEAR(%s.method);" % cname)
def generate_string_constants(self):
c_consts = [(len(c.cname), c.cname, c) for c in self.string_const_index.values()]
c_consts.sort()
py_strings = []
decls_writer = self.parts['string_decls']
for _, cname, c in c_consts:
conditional = False
if c.py_versions and (2 not in c.py_versions or 3 not in c.py_versions):
conditional = True
decls_writer.putln("#if PY_MAJOR_VERSION %s 3" % (
(2 in c.py_versions) and '<' or '>='))
decls_writer.putln('static const char %s[] = "%s";' % (
cname, StringEncoding.split_string_literal(c.escaped_value)))
if conditional:
decls_writer.putln("#endif")
if c.py_strings is not None:
for py_string in c.py_strings.values():
py_strings.append((c.cname, len(py_string.cname), py_string))
for c, cname in sorted(self.pyunicode_ptr_const_index.items()):
utf16_array, utf32_array = StringEncoding.encode_pyunicode_string(c)
if utf16_array:
# Narrow and wide representations differ
decls_writer.putln("#ifdef Py_UNICODE_WIDE")
decls_writer.putln("static Py_UNICODE %s[] = { %s };" % (cname, utf32_array))
if utf16_array:
decls_writer.putln("#else")
decls_writer.putln("static Py_UNICODE %s[] = { %s };" % (cname, utf16_array))
decls_writer.putln("#endif")
init_globals = self.parts['init_globals']
if py_strings:
self.use_utility_code(UtilityCode.load_cached("InitStrings", "StringTools.c"))
py_strings.sort()
w = self.parts['pystring_table']
w.putln("")
w.putln("static __Pyx_StringTabEntry %s[] = {" % Naming.stringtab_cname)
w.putln("#if CYTHON_COMPILING_IN_LIMITED_API")
w_limited_writer = w.insertion_point()
w.putln("#else")
w_not_limited_writer = w.insertion_point()
w.putln("#endif")
decls_writer.putln("#if !CYTHON_COMPILING_IN_LIMITED_API")
not_limited_api_decls_writer = decls_writer.insertion_point()
decls_writer.putln("#endif")
init_globals.putln("#if CYTHON_COMPILING_IN_LIMITED_API")
init_globals_limited_api = init_globals.insertion_point()
init_globals.putln("#endif")
for idx, py_string_args in enumerate(py_strings):
c_cname, _, py_string = py_string_args
if not py_string.is_str or not py_string.encoding or \
py_string.encoding in ('ASCII', 'USASCII', 'US-ASCII',
'UTF8', 'UTF-8'):
encoding = '0'
else:
encoding = '"%s"' % py_string.encoding.lower()
self.parts['module_state'].putln("PyObject *%s;" % py_string.cname)
self.parts['module_state_defines'].putln("#define %s %s->%s" % (
py_string.cname,
Naming.modulestateglobal_cname,
py_string.cname))
self.parts['module_state_clear'].putln("Py_CLEAR(clear_module_state->%s);" %
py_string.cname)
self.parts['module_state_traverse'].putln("Py_VISIT(traverse_module_state->%s);" %
py_string.cname)
not_limited_api_decls_writer.putln(
"static PyObject *%s;" % py_string.cname)
if py_string.py3str_cstring:
w_not_limited_writer.putln("#if PY_MAJOR_VERSION >= 3")
w_not_limited_writer.putln("{&%s, %s, sizeof(%s), %s, %d, %d, %d}," % (
py_string.cname,
py_string.py3str_cstring.cname,
py_string.py3str_cstring.cname,
'0', 1, 0,
py_string.intern
))
w_not_limited_writer.putln("#else")
w_not_limited_writer.putln("{&%s, %s, sizeof(%s), %s, %d, %d, %d}," % (
py_string.cname,
c_cname,
c_cname,
encoding,
py_string.is_unicode,
py_string.is_str,
py_string.intern
))
if py_string.py3str_cstring:
w_not_limited_writer.putln("#endif")
w_limited_writer.putln("{0, %s, sizeof(%s), %s, %d, %d, %d}," % (
c_cname if not py_string.py3str_cstring else py_string.py3str_cstring.cname,
c_cname if not py_string.py3str_cstring else py_string.py3str_cstring.cname,
encoding if not py_string.py3str_cstring else '0',
py_string.is_unicode,
py_string.is_str,
py_string.intern
))
init_globals_limited_api.putln("if (__Pyx_InitString(%s[%d], &%s) < 0) %s;" % (
Naming.stringtab_cname,
idx,
py_string.cname,
init_globals.error_goto(self.module_pos)))
w.putln("{0, 0, 0, 0, 0, 0, 0}")
w.putln("};")
init_globals.putln("#if !CYTHON_COMPILING_IN_LIMITED_API")
init_globals.putln(
"if (__Pyx_InitStrings(%s) < 0) %s;" % (
Naming.stringtab_cname,
init_globals.error_goto(self.module_pos)))
init_globals.putln("#endif")
def generate_num_constants(self):
consts = [(c.py_type, c.value[0] == '-', len(c.value), c.value, c.value_code, c)
for c in self.num_const_index.values()]
consts.sort()
decls_writer = self.parts['decls']
decls_writer.putln("#if !CYTHON_COMPILING_IN_LIMITED_API")
init_globals = self.parts['init_globals']
for py_type, _, _, value, value_code, c in consts:
cname = c.cname
self.parts['module_state'].putln("PyObject *%s;" % cname)
self.parts['module_state_defines'].putln("#define %s %s->%s" % (
cname, Naming.modulestateglobal_cname, cname))
self.parts['module_state_clear'].putln(
"Py_CLEAR(clear_module_state->%s);" % cname)
self.parts['module_state_traverse'].putln(
"Py_VISIT(traverse_module_state->%s);" % cname)
decls_writer.putln("static PyObject *%s;" % cname)
if py_type == 'float':
function = 'PyFloat_FromDouble(%s)'
elif py_type == 'long':
function = 'PyLong_FromString((char *)"%s", 0, 0)'
elif Utils.long_literal(value):
function = 'PyInt_FromString((char *)"%s", 0, 0)'
elif len(value.lstrip('-')) > 4:
function = "PyInt_FromLong(%sL)"
else:
function = "PyInt_FromLong(%s)"
init_globals.putln('%s = %s; %s' % (
cname, function % value_code,
init_globals.error_goto_if_null(cname, self.module_pos)))
decls_writer.putln("#endif")
# The functions below are there in a transition phase only
# and will be deprecated. They are called from Nodes.BlockNode.
# The copy&paste duplication is intentional in order to be able
# to see quickly how BlockNode worked, until this is replaced.
def should_declare(self, cname, entry):
if cname in self.declared_cnames:
other = self.declared_cnames[cname]
assert str(entry.type) == str(other.type)
assert entry.init == other.init
return False
else:
self.declared_cnames[cname] = entry
return True
#
# File name state
#
def lookup_filename(self, source_desc):
entry = source_desc.get_filenametable_entry()
try:
index = self.filename_table[entry]
except KeyError:
index = len(self.filename_list)
self.filename_list.append(source_desc)
self.filename_table[entry] = index
return index
def commented_file_contents(self, source_desc):
try:
return self.input_file_contents[source_desc]
except KeyError:
pass
source_file = source_desc.get_lines(encoding='ASCII',
error_handling='ignore')
try:
F = [u' * ' + line.rstrip().replace(
u'*/', u'*[inserted by cython to avoid comment closer]/'
).replace(
u'/*', u'/[inserted by cython to avoid comment start]*'
)
for line in source_file]
finally:
if hasattr(source_file, 'close'):
source_file.close()
if not F: F.append(u'')
self.input_file_contents[source_desc] = F
return F
#
# Utility code state
#
def use_utility_code(self, utility_code):
"""
Adds code to the C file. utility_code should
a) implement __eq__/__hash__ for the purpose of knowing whether the same
code has already been included
b) implement put_code, which takes a globalstate instance
See UtilityCode.
"""
if utility_code and utility_code not in self.utility_codes:
self.utility_codes.add(utility_code)
utility_code.put_code(self)
def use_entry_utility_code(self, entry):
if entry is None:
return
if entry.utility_code:
self.use_utility_code(entry.utility_code)
if entry.utility_code_definition:
self.use_utility_code(entry.utility_code_definition)
def funccontext_property(func):
name = func.__name__
attribute_of = operator.attrgetter(name)
def get(self):
return attribute_of(self.funcstate)
def set(self, value):
setattr(self.funcstate, name, value)
return property(get, set)
class CCodeConfig(object):
# emit_linenums boolean write #line pragmas?
# emit_code_comments boolean copy the original code into C comments?
# c_line_in_traceback boolean append the c file and line number to the traceback for exceptions?
def __init__(self, emit_linenums=True, emit_code_comments=True, c_line_in_traceback=True):
self.emit_code_comments = emit_code_comments
self.emit_linenums = emit_linenums
self.c_line_in_traceback = c_line_in_traceback
class CCodeWriter(object):
"""
Utility class to output C code.
When creating an insertion point one must care about the state that is
kept:
- formatting state (level, bol) is cloned and used in insertion points
as well
- labels, temps, exc_vars: One must construct a scope in which these can
exist by calling enter_cfunc_scope/exit_cfunc_scope (these are for
sanity checking and forward compatibility). Created insertion points
looses this scope and cannot access it.
- marker: Not copied to insertion point
- filename_table, filename_list, input_file_contents: All codewriters
coming from the same root share the same instances simultaneously.
"""
# f file output file
# buffer StringIOTree
# level int indentation level
# bol bool beginning of line?
# marker string comment to emit before next line
# funcstate FunctionState contains state local to a C function used for code
# generation (labels and temps state etc.)
# globalstate GlobalState contains state global for a C file (input file info,
# utility code, declared constants etc.)
# pyclass_stack list used during recursive code generation to pass information
# about the current class one is in
# code_config CCodeConfig configuration options for the C code writer
@cython.locals(create_from='CCodeWriter')
def __init__(self, create_from=None, buffer=None, copy_formatting=False):
if buffer is None: buffer = StringIOTree()
self.buffer = buffer
self.last_pos = None
self.last_marked_pos = None
self.pyclass_stack = []
self.funcstate = None
self.globalstate = None
self.code_config = None
self.level = 0
self.call_level = 0
self.bol = 1
if create_from is not None:
# Use same global state
self.set_global_state(create_from.globalstate)
self.funcstate = create_from.funcstate
# Clone formatting state
if copy_formatting:
self.level = create_from.level
self.bol = create_from.bol
self.call_level = create_from.call_level
self.last_pos = create_from.last_pos
self.last_marked_pos = create_from.last_marked_pos
def create_new(self, create_from, buffer, copy_formatting):
# polymorphic constructor -- very slightly more versatile
# than using __class__
result = CCodeWriter(create_from, buffer, copy_formatting)
return result
def set_global_state(self, global_state):
assert self.globalstate is None # prevent overwriting once it's set
self.globalstate = global_state
self.code_config = global_state.code_config
def copyto(self, f):
self.buffer.copyto(f)
def getvalue(self):
return self.buffer.getvalue()
def write(self, s):
# also put invalid markers (lineno 0), to indicate that those lines
# have no Cython source code correspondence
cython_lineno = self.last_marked_pos[1] if self.last_marked_pos else 0
self.buffer.markers.extend([cython_lineno] * s.count('\n'))
self.buffer.write(s)
def insertion_point(self):
other = self.create_new(create_from=self, buffer=self.buffer.insertion_point(), copy_formatting=True)
return other
def new_writer(self):
"""
Creates a new CCodeWriter connected to the same global state, which
can later be inserted using insert.
"""
return CCodeWriter(create_from=self)
def insert(self, writer):
"""
Inserts the contents of another code writer (created with
the same global state) in the current location.
It is ok to write to the inserted writer also after insertion.
"""
assert writer.globalstate is self.globalstate
self.buffer.insert(writer.buffer)
# Properties delegated to function scope
@funccontext_property
def label_counter(self): pass
@funccontext_property
def return_label(self): pass
@funccontext_property
def error_label(self): pass
@funccontext_property
def labels_used(self): pass
@funccontext_property
def continue_label(self): pass
@funccontext_property
def break_label(self): pass
@funccontext_property
def return_from_error_cleanup_label(self): pass
@funccontext_property
def yield_labels(self): pass
# Functions delegated to function scope
def new_label(self, name=None): return self.funcstate.new_label(name)
def new_error_label(self): return self.funcstate.new_error_label()
def new_yield_label(self, *args): return self.funcstate.new_yield_label(*args)
def get_loop_labels(self): return self.funcstate.get_loop_labels()
def set_loop_labels(self, labels): return self.funcstate.set_loop_labels(labels)
def new_loop_labels(self): return self.funcstate.new_loop_labels()
def get_all_labels(self): return self.funcstate.get_all_labels()
def set_all_labels(self, labels): return self.funcstate.set_all_labels(labels)
def all_new_labels(self): return self.funcstate.all_new_labels()
def use_label(self, lbl): return self.funcstate.use_label(lbl)
def label_used(self, lbl): return self.funcstate.label_used(lbl)
def enter_cfunc_scope(self, scope=None):
self.funcstate = FunctionState(self, scope=scope)
def exit_cfunc_scope(self):
self.funcstate.validate_exit()
self.funcstate = None
# constant handling
def get_py_int(self, str_value, longness):
return self.globalstate.get_int_const(str_value, longness).cname
def get_py_float(self, str_value, value_code):
return self.globalstate.get_float_const(str_value, value_code).cname
def get_py_const(self, type, prefix='', cleanup_level=None, dedup_key=None):
return self.globalstate.get_py_const(type, prefix, cleanup_level, dedup_key).cname
def get_string_const(self, text):
return self.globalstate.get_string_const(text).cname
def get_pyunicode_ptr_const(self, text):
return self.globalstate.get_pyunicode_ptr_const(text)
def get_py_string_const(self, text, identifier=None,
is_str=False, unicode_value=None):
return self.globalstate.get_py_string_const(
text, identifier, is_str, unicode_value).cname
def get_argument_default_const(self, type):
return self.globalstate.get_py_const(type).cname
def intern(self, text):
return self.get_py_string_const(text)
def intern_identifier(self, text):
return self.get_py_string_const(text, identifier=True)
def get_cached_constants_writer(self, target=None):
return self.globalstate.get_cached_constants_writer(target)
# code generation
def putln(self, code="", safe=False):
if self.last_pos and self.bol:
self.emit_marker()
if self.code_config.emit_linenums and self.last_marked_pos:
source_desc, line, _ = self.last_marked_pos
self.write('\n#line %s "%s"\n' % (line, source_desc.get_escaped_description()))
if code:
if safe:
self.put_safe(code)
else:
self.put(code)
self.write("\n")
self.bol = 1
def mark_pos(self, pos, trace=True):
if pos is None:
return
if self.last_marked_pos and self.last_marked_pos[:2] == pos[:2]:
return
self.last_pos = (pos, trace)
def emit_marker(self):
pos, trace = self.last_pos
self.last_marked_pos = pos
self.last_pos = None
self.write("\n")
if self.code_config.emit_code_comments:
self.indent()
self.write("/* %s */\n" % self._build_marker(pos))
if trace and self.funcstate and self.funcstate.can_trace and self.globalstate.directives['linetrace']:
self.indent()
self.write('__Pyx_TraceLine(%d,%d,%s)\n' % (
pos[1], not self.funcstate.gil_owned, self.error_goto(pos)))
def _build_marker(self, pos):
source_desc, line, col = pos
assert isinstance(source_desc, SourceDescriptor)
contents = self.globalstate.commented_file_contents(source_desc)
lines = contents[max(0, line-3):line] # line numbers start at 1
lines[-1] += u' # <<<<<<<<<<<<<<'
lines += contents[line:line+2]
return u'"%s":%d\n%s\n' % (source_desc.get_escaped_description(), line, u'\n'.join(lines))
def put_safe(self, code):
# put code, but ignore {}
self.write(code)
self.bol = 0
def put_or_include(self, code, name):
include_dir = self.globalstate.common_utility_include_dir
if include_dir and len(code) > 1024:
include_file = "%s_%s.h" % (
name, hashlib.sha1(code.encode('utf8')).hexdigest())
path = os.path.join(include_dir, include_file)
if not os.path.exists(path):
tmp_path = '%s.tmp%s' % (path, os.getpid())
with closing(Utils.open_new_file(tmp_path)) as f:
f.write(code)
shutil.move(tmp_path, path)
code = '#include "%s"\n' % path
self.put(code)
def put(self, code):
fix_indent = False
if "{" in code:
dl = code.count("{")
else:
dl = 0
if "}" in code:
dl -= code.count("}")
if dl < 0:
self.level += dl
elif dl == 0 and code[0] == "}":
# special cases like "} else {" need a temporary dedent
fix_indent = True
self.level -= 1
if self.bol:
self.indent()
self.write(code)
self.bol = 0
if dl > 0:
self.level += dl
elif fix_indent:
self.level += 1
def putln_tempita(self, code, **context):
from ..Tempita import sub
self.putln(sub(code, **context))
def put_tempita(self, code, **context):
from ..Tempita import sub
self.put(sub(code, **context))
def increase_indent(self):
self.level += 1
def decrease_indent(self):
self.level -= 1
def begin_block(self):
self.putln("{")
self.increase_indent()
def end_block(self):
self.decrease_indent()
self.putln("}")
def indent(self):
self.write(" " * self.level)
def get_py_version_hex(self, pyversion):
return "0x%02X%02X%02X%02X" % (tuple(pyversion) + (0,0,0,0))[:4]
def put_label(self, lbl):
if lbl in self.funcstate.labels_used:
self.putln("%s:;" % lbl)
def put_goto(self, lbl):
self.funcstate.use_label(lbl)
self.putln("goto %s;" % lbl)
def put_var_declaration(self, entry, storage_class="",
dll_linkage=None, definition=True):
#print "Code.put_var_declaration:", entry.name, "definition =", definition ###
if entry.visibility == 'private' and not (definition or entry.defined_in_pxd):
#print "...private and not definition, skipping", entry.cname ###
return
if entry.visibility == "private" and not entry.used:
#print "...private and not used, skipping", entry.cname ###
return
if storage_class:
self.put("%s " % storage_class)
if not entry.cf_used:
self.put('CYTHON_UNUSED ')
self.put(entry.type.declaration_code(
entry.cname, dll_linkage=dll_linkage))
if entry.init is not None:
self.put_safe(" = %s" % entry.type.literal_code(entry.init))
elif entry.type.is_pyobject or entry.type.is_cyp_class:
self.put(" = NULL")
self.putln(";")
def put_temp_declarations(self, func_context):
for name, type, manage_ref, static in func_context.temps_allocated:
decl = type.declaration_code(name)
if type.is_pyobject:
self.putln("%s = NULL;" % decl)
elif type.is_memoryviewslice:
self.putln("%s = %s;" % (decl, type.literal_code(type.default_value)))
elif type.is_cyp_class:
self.putln("%s = NULL;" % decl)
else:
self.putln("%s%s;" % (static and "static " or "", decl))
if func_context.should_declare_error_indicator:
if self.funcstate.uses_error_indicator:
unused = ''
else:
unused = 'CYTHON_UNUSED '
# Initialize these variables to silence compiler warnings
self.putln("%sint %s = 0;" % (unused, Naming.lineno_cname))
self.putln("%sconst char *%s = NULL;" % (unused, Naming.filename_cname))
self.putln("%sint %s = 0;" % (unused, Naming.clineno_cname))
def put_generated_by(self):
self.putln("/* Generated by Cython %s */" % Version.watermark)
self.putln("")
def put_h_guard(self, guard):
self.putln("#ifndef %s" % guard)
self.putln("#define %s" % guard)
def unlikely(self, cond):
if Options.gcc_branch_hints:
return 'unlikely(%s)' % cond
else:
return cond
def build_function_modifiers(self, modifiers, mapper=modifier_output_mapper):
if not modifiers:
return ''
return '%s ' % ' '.join([mapper(m,m) for m in modifiers])
# Reference counting
def entry_as_pyobject(self, entry):
type = entry.type
if (not entry.is_self_arg and not entry.type.is_complete()
or entry.type.is_extension_type):
return "(PyObject *)" + entry.cname
else:
return entry.cname
def as_pyobject(self, cname, type):
from .PyrexTypes import py_object_type, typecast
return typecast(py_object_type, type, cname)
def put_gotref(self, cname, type):
type.generate_gotref(self, cname)
def put_giveref(self, cname, type):
type.generate_giveref(self, cname)
def put_xgiveref(self, cname, type):
type.generate_xgiveref(self, cname)
def put_xgotref(self, cname, type):
type.generate_xgotref(self, cname)
def put_incref(self, cname, type, nanny=True):
# Note: original put_Memslice_Incref/Decref also added in some utility code
# this is unnecessary since the relevant utility code is loaded anyway if a memoryview is used
# and so has been removed. However, it's potentially a feature that might be useful here
type.generate_incref(self, cname, nanny=nanny)
def put_xincref(self, cname, type, nanny=True):
type.generate_xincref(self, cname, nanny=nanny)
def put_decref(self, cname, type, nanny=True, have_gil=True):
type.generate_decref(self, cname, nanny=nanny, have_gil=have_gil)
def put_xdecref(self, cname, type, nanny=True, have_gil=True):
type.generate_xdecref(self, cname, nanny=nanny, have_gil=have_gil)
def put_decref_clear(self, cname, type, clear_before_decref=False, nanny=True, have_gil=True):
type.generate_decref_clear(self, cname, clear_before_decref=clear_before_decref,
nanny=nanny, have_gil=have_gil)
def put_xdecref_clear(self, cname, type, clear_before_decref=False, nanny=True, have_gil=True):
type.generate_xdecref_clear(self, cname, clear_before_decref=clear_before_decref,
nanny=nanny, have_gil=have_gil)
def put_decref_set(self, cname, type, rhs_cname):
type.generate_decref_set(self, cname, rhs_cname)
def put_xdecref_set(self, cname, type, rhs_cname):
type.generate_xdecref_set(self, cname, rhs_cname)
def put_incref_memoryviewslice(self, slice_cname, type, have_gil):
# TODO ideally this would just be merged into "put_incref"
type.generate_incref_memoryviewslice(self, slice_cname, have_gil=have_gil)
def put_var_incref_memoryviewslice(self, entry, have_gil):
self.put_incref_memoryviewslice(entry.cname, entry.type, have_gil=have_gil)
def put_var_gotref(self, entry):
self.put_gotref(entry.cname, entry.type)
def put_var_giveref(self, entry):
self.put_giveref(entry.cname, entry.type)
def put_var_xgotref(self, entry):
self.put_xgotref(entry.cname, entry.type)
def put_var_xgiveref(self, entry):
self.put_xgiveref(entry.cname, entry.type)
def put_var_incref(self, entry, **kwds):
self.put_incref(entry.cname, entry.type, **kwds)
def put_var_xincref(self, entry, **kwds):
self.put_xincref(entry.cname, entry.type, **kwds)
def put_var_decref(self, entry, **kwds):
self.put_decref(entry.cname, entry.type, **kwds)
def put_var_xdecref(self, entry, **kwds):
self.put_xdecref(entry.cname, entry.type, **kwds)
def put_var_decref_clear(self, entry, **kwds):
self.put_decref_clear(entry.cname, entry.type, clear_before_decref=entry.in_closure, **kwds)
def put_var_decref_set(self, entry, rhs_cname, **kwds):
self.put_decref_set(entry.cname, entry.type, rhs_cname, **kwds)
def put_var_xdecref_set(self, entry, rhs_cname, **kwds):
self.put_xdecref_set(entry.cname, entry.type, rhs_cname, **kwds)
def put_var_xdecref_clear(self, entry, **kwds):
self.put_xdecref_clear(entry.cname, entry.type, clear_before_decref=entry.in_closure, **kwds)
def put_var_decrefs(self, entries, used_only = 0):
for entry in entries:
if not used_only or entry.used:
if entry.xdecref_cleanup:
self.put_var_xdecref(entry)
else:
self.put_var_decref(entry)
def put_var_xdecrefs(self, entries):
for entry in entries:
self.put_var_xdecref(entry)
def put_var_xdecrefs_clear(self, entries):
for entry in entries:
self.put_var_xdecref_clear(entry)
def put_init_to_py_none(self, cname, type, nanny=True):
from .PyrexTypes import py_object_type, typecast
py_none = typecast(type, py_object_type, "Py_None")
if nanny:
self.putln("%s = %s; __Pyx_INCREF(Py_None);" % (cname, py_none))
else:
self.putln("%s = %s; Py_INCREF(Py_None);" % (cname, py_none))
def put_init_var_to_py_none(self, entry, template = "%s", nanny=True):
code = template % entry.cname
#if entry.type.is_extension_type:
# code = "((PyObject*)%s)" % code
self.put_init_to_py_none(code, entry.type, nanny)
if entry.in_closure:
self.put_giveref('Py_None')
def put_pymethoddef(self, entry, term, allow_skip=True, wrapper_code_writer=None):
if entry.is_special or entry.name == '__getattribute__':
if entry.name not in special_py_methods:
if entry.name == '__getattr__' and not self.globalstate.directives['fast_getattr']:
pass
# Python's typeobject.c will automatically fill in our slot
# in add_operators() (called by PyType_Ready) with a value
# that's better than ours.
elif allow_skip:
return
method_flags = entry.signature.method_flags()
if not method_flags:
return
if entry.is_special:
from . import TypeSlots
method_flags += [TypeSlots.method_coexist]
func_ptr = wrapper_code_writer.put_pymethoddef_wrapper(entry) if wrapper_code_writer else entry.func_cname
# Add required casts, but try not to shadow real warnings.
cast = entry.signature.method_function_type()
if cast != 'PyCFunction':
func_ptr = '(void*)(%s)%s' % (cast, func_ptr)
entry_name = entry.name.as_c_string_literal()
self.putln(
'{%s, (PyCFunction)%s, %s, %s}%s' % (
entry_name,
func_ptr,
"|".join(method_flags),
entry.doc_cname if entry.doc else '0',
term))
def put_pymethoddef_wrapper(self, entry):
func_cname = entry.func_cname
if entry.is_special:
method_flags = entry.signature.method_flags() or []
from .TypeSlots import method_noargs
if method_noargs in method_flags:
# Special NOARGS methods really take no arguments besides 'self', but PyCFunction expects one.
func_cname = Naming.method_wrapper_prefix + func_cname
self.putln("static PyObject *%s(PyObject *self, CYTHON_UNUSED PyObject *arg) {return %s(self);}" % (
func_cname, entry.func_cname))
return func_cname
# GIL methods
def use_fast_gil_utility_code(self):
if self.globalstate.directives['fast_gil']:
self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
else:
self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
def put_ensure_gil(self, declare_gilstate=True, variable=None):
"""
Acquire the GIL. The generated code is safe even when no PyThreadState
has been allocated for this thread (for threads not initialized by
using the Python API). Additionally, the code generated by this method
may be called recursively.
"""
self.globalstate.use_utility_code(
UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
self.use_fast_gil_utility_code()
self.putln("#ifdef WITH_THREAD")
if not variable:
variable = '__pyx_gilstate_save'
if declare_gilstate:
self.put("PyGILState_STATE ")
self.putln("%s = __Pyx_PyGILState_Ensure();" % variable)
self.putln("#endif")
def put_release_ensured_gil(self, variable=None):
"""
Releases the GIL, corresponds to `put_ensure_gil`.
"""
self.use_fast_gil_utility_code()
if not variable:
variable = '__pyx_gilstate_save'
self.putln("#ifdef WITH_THREAD")
self.putln("__Pyx_PyGILState_Release(%s);" % variable)
self.putln("#endif")
def put_acquire_gil(self, variable=None):
"""
Acquire the GIL. The thread's thread state must have been initialized
by a previous `put_release_gil`
"""
self.use_fast_gil_utility_code()
self.putln("#ifdef WITH_THREAD")
self.putln("__Pyx_FastGIL_Forget();")
if variable:
self.putln('_save = %s;' % variable)
self.putln("Py_BLOCK_THREADS")
self.putln("#endif")
def put_release_gil(self, variable=None):
"Release the GIL, corresponds to `put_acquire_gil`."
self.use_fast_gil_utility_code()
self.putln("#ifdef WITH_THREAD")
self.putln("PyThreadState *_save;")
self.putln("Py_UNBLOCK_THREADS")
if variable:
self.putln('%s = _save;' % variable)
self.putln("__Pyx_FastGIL_Remember();")
self.putln("#endif")
def declare_gilstate(self):
self.putln("#ifdef WITH_THREAD")
self.putln("PyGILState_STATE __pyx_gilstate_save;")
self.putln("#endif")
# error handling
def put_error_if_neg(self, pos, value):
# TODO this path is almost _never_ taken, yet this macro makes is slower!
# return self.putln("if (unlikely(%s < 0)) %s" % (value, self.error_goto(pos)))
return self.putln("if (%s < 0) %s" % (value, self.error_goto(pos)))
def put_error_if_unbound(self, pos, entry, in_nogil_context=False):
from . import ExprNodes
if entry.from_closure:
func = '__Pyx_RaiseClosureNameError'
self.globalstate.use_utility_code(
ExprNodes.raise_closure_name_error_utility_code)
elif entry.type.is_memoryviewslice and in_nogil_context:
func = '__Pyx_RaiseUnboundMemoryviewSliceNogil'
self.globalstate.use_utility_code(
ExprNodes.raise_unbound_memoryview_utility_code_nogil)
else:
func = '__Pyx_RaiseUnboundLocalError'
self.globalstate.use_utility_code(
ExprNodes.raise_unbound_local_error_utility_code)
self.putln('if (unlikely(!%s)) { %s("%s"); %s }' % (
entry.type.check_for_null_code(entry.cname),
func,
entry.name,
self.error_goto(pos)))
def set_error_info(self, pos, used=False):
self.funcstate.should_declare_error_indicator = True
if used:
self.funcstate.uses_error_indicator = True
return "__PYX_MARK_ERR_POS(%s, %s)" % (
self.lookup_filename(pos[0]),
pos[1])
def error_goto(self, pos, used=True):
lbl = self.funcstate.error_label
self.funcstate.use_label(lbl)
if pos is None:
return 'goto %s;' % lbl
self.funcstate.should_declare_error_indicator = True
if used:
self.funcstate.uses_error_indicator = True
return "__PYX_ERR(%s, %s, %s)" % (
self.lookup_filename(pos[0]),
pos[1],
lbl)
def error_goto_if(self, cond, pos):
return "if (%s) %s" % (self.unlikely(cond), self.error_goto(pos))
def error_goto_if_null(self, cname, pos):
return self.error_goto_if("!%s" % cname, pos)
def error_goto_if_neg(self, cname, pos):
return self.error_goto_if("%s < 0" % cname, pos)
def error_goto_if_PyErr(self, pos):
return self.error_goto_if("PyErr_Occurred()", pos)
def lookup_filename(self, filename):
return self.globalstate.lookup_filename(filename)
def put_declare_refcount_context(self):
self.putln('__Pyx_RefNannyDeclarations')
def put_setup_refcount_context(self, name, acquire_gil=False):
name = name.as_c_string_literal() # handle unicode names
if acquire_gil:
self.globalstate.use_utility_code(
UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
self.putln('__Pyx_RefNannySetupContext(%s, %d);' % (name, acquire_gil and 1 or 0))
def put_finish_refcount_context(self, nogil=False):
self.putln("__Pyx_RefNannyFinishContextNogil()" if nogil else "__Pyx_RefNannyFinishContext();")
def put_add_traceback(self, qualified_name, include_cline=True):
"""
Build a Python traceback for propagating exceptions.
qualified_name should be the qualified name of the function.
"""
qualified_name = qualified_name.as_c_string_literal() # handle unicode names
format_tuple = (
qualified_name,
Naming.clineno_cname if include_cline else 0,
Naming.lineno_cname,
Naming.filename_cname,
)
self.funcstate.uses_error_indicator = True
self.putln('__Pyx_AddTraceback(%s, %s, %s, %s);' % format_tuple)
def put_unraisable(self, qualified_name, nogil=False):
"""
Generate code to print a Python warning for an unraisable exception.
qualified_name should be the qualified name of the function.
"""
format_tuple = (
qualified_name,
Naming.clineno_cname,
Naming.lineno_cname,
Naming.filename_cname,
self.globalstate.directives['unraisable_tracebacks'],
nogil,
)
self.funcstate.uses_error_indicator = True
self.putln('__Pyx_WriteUnraisable("%s", %s, %s, %s, %d, %d);' % format_tuple)
self.globalstate.use_utility_code(
UtilityCode.load_cached("WriteUnraisableException", "Exceptions.c"))
def put_trace_declarations(self):
self.putln('__Pyx_TraceDeclarations')
def put_trace_frame_init(self, codeobj=None):
if codeobj:
self.putln('__Pyx_TraceFrameInit(%s)' % codeobj)
def put_trace_call(self, name, pos, nogil=False):
self.putln('__Pyx_TraceCall("%s", %s[%s], %s, %d, %s);' % (
name, Naming.filetable_cname, self.lookup_filename(pos[0]), pos[1], nogil, self.error_goto(pos)))
def put_trace_exception(self):
self.putln("__Pyx_TraceException();")
def put_trace_return(self, retvalue_cname, nogil=False):
self.putln("__Pyx_TraceReturn(%s, %d);" % (retvalue_cname, nogil))
def putln_openmp(self, string):
self.putln("#ifdef _OPENMP")
self.putln(string)
self.putln("#endif /* _OPENMP */")
def undef_builtin_expect(self, cond):
"""
Redefine the macros likely() and unlikely to no-ops, depending on
condition 'cond'
"""
self.putln("#if %s" % cond)
self.putln(" #undef likely")
self.putln(" #undef unlikely")
self.putln(" #define likely(x) (x)")
self.putln(" #define unlikely(x) (x)")
self.putln("#endif")
def redef_builtin_expect(self, cond):
self.putln("#if %s" % cond)
self.putln(" #undef likely")
self.putln(" #undef unlikely")
self.putln(" #define likely(x) __builtin_expect(!!(x), 1)")
self.putln(" #define unlikely(x) __builtin_expect(!!(x), 0)")
self.putln("#endif")
class PyrexCodeWriter(object):
# f file output file
# level int indentation level
def __init__(self, outfile_name):
self.f = Utils.open_new_file(outfile_name)
self.level = 0
def putln(self, code):
self.f.write("%s%s\n" % (" " * self.level, code))
def indent(self):
self.level += 1
def dedent(self):
self.level -= 1
class PyxCodeWriter(object):
"""
Can be used for writing out some Cython code. To use the indenter
functionality, the Cython.Compiler.Importer module will have to be used
to load the code to support python 2.4
"""
def __init__(self, buffer=None, indent_level=0, context=None, encoding='ascii'):
self.buffer = buffer or StringIOTree()
self.level = indent_level
self.context = context
self.encoding = encoding
def indent(self, levels=1):
self.level += levels
return True
def dedent(self, levels=1):
self.level -= levels
def indenter(self, line):
"""
Instead of
with pyx_code.indenter("for i in range(10):"):
pyx_code.putln("print i")
write
if pyx_code.indenter("for i in range(10);"):
pyx_code.putln("print i")
pyx_code.dedent()
"""
self.putln(line)
self.indent()
return True
def getvalue(self):
result = self.buffer.getvalue()
if isinstance(result, bytes):
result = result.decode(self.encoding)
return result
def putln(self, line, context=None):
context = context or self.context
if context:
line = sub_tempita(line, context)
self._putln(line)
def _putln(self, line):
self.buffer.write("%s%s\n" % (self.level * " ", line))
def put_chunk(self, chunk, context=None):
context = context or self.context
if context:
chunk = sub_tempita(chunk, context)
chunk = textwrap.dedent(chunk)
for line in chunk.splitlines():
self._putln(line)
def insertion_point(self):
return PyxCodeWriter(self.buffer.insertion_point(), self.level,
self.context)
def named_insertion_point(self, name):
setattr(self, name, self.insertion_point())
class ClosureTempAllocator(object):
def __init__(self, klass):
self.klass = klass
self.temps_allocated = {}
self.temps_free = {}
self.temps_count = 0
def reset(self):
for type, cnames in self.temps_allocated.items():
self.temps_free[type] = list(cnames)
def allocate_temp(self, type):
if type not in self.temps_allocated:
self.temps_allocated[type] = []
self.temps_free[type] = []
elif self.temps_free[type]:
return self.temps_free[type].pop(0)
cname = '%s%d' % (Naming.codewriter_temp_prefix, self.temps_count)
self.klass.declare_var(pos=None, name=cname, cname=cname, type=type, is_cdef=True)
self.temps_allocated[type].append(cname)
self.temps_count += 1
return cname
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/CodeGeneration.py 0000664 0000000 0000000 00000002124 13766135517 0031530 0 ustar 00root root 0000000 0000000 from __future__ import absolute_import
from .Visitor import VisitorTransform
from .Nodes import StatListNode
class ExtractPxdCode(VisitorTransform):
"""
Finds nodes in a pxd file that should generate code, and
returns them in a StatListNode.
The result is a tuple (StatListNode, ModuleScope), i.e.
everything that is needed from the pxd after it is processed.
A purer approach would be to separately compile the pxd code,
but the result would have to be slightly more sophisticated
than pure strings (functions + wanted interned strings +
wanted utility code + wanted cached objects) so for now this
approach is taken.
"""
def __call__(self, root):
self.funcs = []
self.visitchildren(root)
return (StatListNode(root.pos, stats=self.funcs), root.scope)
def visit_FuncDefNode(self, node):
self.funcs.append(node)
# Do not visit children, nested funcdefnodes will
# also be moved by this action...
return node
def visit_Node(self, node):
self.visitchildren(node)
return node
CypclassTransforms.py 0000664 0000000 0000000 00000037143 13766135517 0032434 0 ustar 00root root 0000000 0000000 cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler #
# Cypclass transforms
#
from __future__ import absolute_import
import cython
cython.declare(Naming=object, PyrexTypes=object, EncodedString=object, error=object)
from collections import defaultdict
from . import Naming
from . import Nodes
from . import PyrexTypes
from . import ExprNodes
from . import Visitor
from . import TreeFragment
from .StringEncoding import EncodedString
from .ParseTreeTransforms import NormalizeTree, InterpretCompilerDirectives, DecoratorTransform, AnalyseDeclarationsTransform
from .Errors import error
#
# Visitor for wrapper cclass injection
#
# - Insert additional cclass wrapper nodes by returning lists of nodes
# => must run after NormalizeTree (otherwise single statements might not be held in a list)
#
class CypclassWrapperInjection(Visitor.CythonTransform):
"""
Synthesize and insert a wrapper c class at the module level for each cypclass that supports it.
- Even nested cypclasses have their wrapper at the module level.
- Must run after NormalizeTree.
- The root node passed when calling this visitor should not be lower than a ModuleNode.
"""
# property templates
property_template = TreeFragment.TreeFragment(u"""
property NAME:
def __get__(self):
OBJ = self
return OBJ.ATTR
def __set__(self, value):
OBJ = self
OBJ.ATTR = value
""", level='c_class', pipeline=[NormalizeTree(None)])
# method wrapper templates
method_template = TreeFragment.TreeFragment(u"""
def NAME(self, ARGDECLS):
OBJ = self
return OBJ.NAME(ARGS)
""", level='c_class', pipeline=[NormalizeTree(None)])
method_no_return_template = TreeFragment.TreeFragment(u"""
def NAME(self, ARGDECLS):
OBJ = self
OBJ.NAME(ARGS)
""", level='c_class', pipeline=[NormalizeTree(None)])
# static method wrapper templates
static_template = TreeFragment.TreeFragment(u"""
@staticmethod
def NAME(ARGDECLS):
return TYPE_NAME.NAME(ARGS)
""", level='c_class', pipeline=[NormalizeTree(None)])
static_no_return_template = TreeFragment.TreeFragment(u"""
@staticmethod
def NAME(ARGDECLS):
TYPE_NAME.NAME(ARGS)
""", level='c_class', pipeline=[NormalizeTree(None)])
def __call__(self, root):
self.pipeline = [
InterpretCompilerDirectives(self.context, self.context.compiler_directives),
DecoratorTransform(self.context),
AnalyseDeclarationsTransform(self.context)
]
return super(CypclassWrapperInjection, self).__call__(root)
def visit_ExprNode(self, node):
# avoid visiting sub expressions
return node
def visit_ModuleNode(self, node):
self.collected_cypclasses = []
self.wrappers = []
self.type_to_names = {}
self.base_type_to_deferred = defaultdict(list)
self.synthesized = set()
self.nesting_stack = []
self.module_scope = node.scope
self.visitchildren(node)
self.inject_cypclass_wrappers(node)
return node
# TODO: can cypclasses be nested in something other than this ?
# can cypclasses even be nested in non-cypclass cpp classes, or structs ?
def visit_CStructOrUnionDefNode(self, node):
self.nesting_stack.append(node)
self.visitchildren(node)
self.nesting_stack.pop()
return node
def visit_CppClassNode(self, node):
if node.cypclass:
self.collect_cypclass(node)
# visit children and keep track of nesting
return self.visit_CStructOrUnionDefNode(node)
def collect_cypclass(self, node):
if node.templates:
# Python wrapper for templated cypclasses not supported yet
return
if node.attributes is None:
# skip forward declarations
return
if node.entry.defined_in_pxd:
# Skip cypclasses defined in a pxd file
return
new_entry = node.scope.lookup_here("__new__")
if new_entry and new_entry.type.return_type is not node.entry.type:
# skip cypclasses that don't instanciate their own type
return
# indicate that the cypclass will have a wrapper
node.entry.type.support_wrapper = True
self.derive_names(node)
self.collected_cypclasses.append(node)
def create_unique_name(self, name, entries=None):
# output: name(_u_*)?
# guarantees:
# - different inputs always result in different outputs
# - the output is not among the given entries
# if entries is None, the module scope entries are used
unique_name = name
entries = self.module_scope.entries if entries is None else entries
if unique_name in entries:
unique_name = "%s_u" % unique_name
while unique_name in entries:
unique_name = "%s_" % unique_name
return EncodedString(unique_name)
def derive_names(self, node):
nested_names = [node.name for node in self.nesting_stack]
nested_names.append(node.name)
qualified_name = ".".join(nested_names)
qualified_name = EncodedString(qualified_name)
nested_name = "_".join(nested_names)
cclass_name = self.create_unique_name("%s_cyp_cclass_wrapper" % nested_name)
self.type_to_names[node.entry.type] = qualified_name, cclass_name
def inject_cypclass_wrappers(self, module_node):
for collected in self.collected_cypclasses:
self.synthesize_wrappers(collected)
# only a shallow copy: retains the same scope etc
fake_module_node = module_node.clone_node()
fake_module_node.body = Nodes.StatListNode(
module_node.body.pos,
stats = self.wrappers
)
for phase in self.pipeline:
fake_module_node = phase(fake_module_node)
module_node.body.stats.extend(fake_module_node.body.stats)
def synthesize_wrappers(self, node):
node_type = node.entry.type
for wrapped_base_type in node_type.iter_wrapped_base_types():
if wrapped_base_type not in self.synthesized:
self.base_type_to_deferred[wrapped_base_type].append(lambda: self.synthesize_wrappers(node))
return
qualified_name, cclass_name = self.type_to_names[node_type]
cclass = self.synthesize_wrapper_cclass(node, cclass_name, qualified_name)
# mark this cypclass as having synthesized wrappers
self.synthesized.add(node_type)
# forward declare the cclass wrapper
cclass.declare(self.module_scope)
self.wrappers.append(cclass)
# synthesize deferred dependent subclasses
for thunk in self.base_type_to_deferred[node_type]:
thunk()
def synthesize_base_tuple(self, node):
node_type = node.entry.type
bases_args = []
for base in node_type.iter_wrapped_base_types():
bases_args.append(ExprNodes.NameNode(node.pos, name=base.wrapper_type.name))
return ExprNodes.TupleNode(node.pos, args=bases_args)
def synthesize_wrapper_cclass(self, node, cclass_name, qualified_name):
cclass_bases = self.synthesize_base_tuple(node)
stats = []
# insert method wrappers in the statement list
self.insert_cypclass_method_wrappers(node, cclass_name, stats)
cclass_body = Nodes.StatListNode(pos=node.pos, stats=stats)
cclass_doc = EncodedString("Python Object wrapper for underlying cypclass %s" % qualified_name)
wrapper = Nodes.CypclassWrapperDefNode(
node.pos,
visibility = 'private',
typedef_flag = 0,
api = 0,
module_name = "",
class_name = cclass_name,
as_name = cclass_name,
bases = cclass_bases,
objstruct_name = Naming.cypclass_wrapper_layout_type,
typeobj_name = None,
check_size = None,
in_pxd = node.in_pxd,
doc = cclass_doc,
body = cclass_body,
wrapped_cypclass = node,
wrapped_nested_name = qualified_name
)
return wrapper
def insert_cypclass_method_wrappers(self, node, cclass_name, stats):
for attr in node.scope.entries.values():
if attr.is_cfunction:
alternatives = attr.all_alternatives()
# > consider the alternatives that are actually defined in this wrapped cypclass
local_alternatives = [e for e in alternatives if e.mro_index == 0]
if len(local_alternatives) == 0:
# all alternatives are inherited, skip this method
continue
if len(alternatives) > 1:
py_args_alternatives = [e for e in local_alternatives if all(arg.type.is_pyobject for arg in e.type.args)]
if len(py_args_alternatives) == 1:
# if there is a single locally defined method with all-python arguments, use that one
attr = py_args_alternatives[0]
else:
# else skip overloaded method for now
continue
py_method_wrapper = self.synthesize_cypclass_method_wrapper(node, cclass_name, attr)
if py_method_wrapper:
stats.append(py_method_wrapper)
elif attr.is_variable and attr.name != "this":
property = self.synthesize_property(attr, node.entry)
if property:
stats.append(property)
def synthesize_property(self, attr_entry, node_entry):
if not attr_entry.type.can_coerce_to_pyobject(self.module_scope):
return None
if not attr_entry.type.can_coerce_from_pyobject(self.module_scope):
return None
template = self.property_template
underlying_name = EncodedString("o")
property = template.substitute({
"ATTR": attr_entry.name,
"TYPE": node_entry.type,
"OBJ": ExprNodes.NameNode(attr_entry.pos, name=underlying_name),
}, pos=attr_entry.pos).stats[0]
property.name = attr_entry.name
property.doc = attr_entry.doc
return property
def synthesize_cypclass_method_wrapper(self, node, cclass_name, method_entry):
if method_entry.type.is_static_method and method_entry.static_cname is None:
# for now skip static methods, except when they are wrapped by a virtual method
return
if method_entry.name in ("", "", "__new__", ""):
# skip special methods that should not be wrapped
return
method_type = method_entry.type
if method_type.self_qualifier:
# skip methods with qualified self
return
if method_type.optional_arg_count:
# for now skip methods with optional arguments
return
return_type = method_type.return_type
# we pass the global scope as argument, should not affect the result (?)
if not return_type.can_coerce_to_pyobject(self.module_scope):
# skip c methods with Python-incompatible return types
return
for argtype in method_type.args:
if not argtype.type.can_coerce_from_pyobject(self.module_scope):
# skip c methods with Python-incompatible argument types
return
# > name of the wrapping method: same name as in the original code
method_name = method_entry.original_name
if method_name is None:
# skip methods that don't have an original name
return
py_name = method_name
# > all arguments of the wrapper method declaration
py_args_decls = []
for arg in method_type.args:
arg_base_type = Nodes.CSimpleBaseTypeNode(
method_entry.pos,
name = None,
module_path = [],
is_basic_c_type = 0,
signed = 0,
complex = 0,
longness = 0,
is_self_arg = 0,
templates = None
)
arg_declarator = Nodes.CNameDeclaratorNode(
method_entry.pos,
name=arg.name,
cname=None
)
arg_decl = Nodes.CArgDeclNode(
method_entry.pos,
base_type = arg_base_type,
declarator = arg_declarator,
not_none = 0,
or_none = 0,
default = None,
annotation = None,
kw_only = 0
)
py_args_decls.append(arg_decl)
# > same docstring
py_doc = method_entry.doc
# > names of the arguments passed when calling the underlying method; self not included
arg_objs = [ExprNodes.NameNode(arg.pos, name=arg.name) for arg in method_type.args]
# > access the underlying attribute
underlying_type = node.entry.type
# > select the appropriate template and create the wrapper defnode
need_return = not return_type.is_void
if method_entry.type.is_static_method:
template = self.static_template if need_return else self.static_no_return_template
method_wrapper = template.substitute({
"NAME": method_name,
"ARGDECLS": py_args_decls,
"TYPE_NAME": ExprNodes.NameNode(method_entry.pos, name=node.name),
"ARGS": arg_objs
}).stats[0]
else:
template = self.method_template if need_return else self.method_no_return_template
# > derive a unique name that doesn't collide with the arguments
underlying_name = self.create_unique_name("o", entries=[arg.name for arg in arg_objs])
# > instanciate the wrapper from the template
method_wrapper = template.substitute({
"NAME": method_name,
"ARGDECLS": py_args_decls,
"TYPE": underlying_type,
"OBJ": ExprNodes.NameNode(method_entry.pos, name=underlying_name),
"ARGS": arg_objs
}).stats[0]
method_wrapper.doc = py_doc
return method_wrapper
class CypclassLockTransform(Visitor.EnvTransform):
"""
Acquire cypclass locks where required.
"""
def __call__(self, root):
self.writing = False
return super(CypclassLockTransform, self).__call__(root)
def visit_AttributeNode(self, node):
obj = node.obj
objtype = obj.type
nodetype = node.type
if objtype is None or nodetype is None:
self.visitchildren(node)
return node
field_access = node.entry and not node.entry.is_type and not nodetype.is_cfunction
method_call = nodetype.is_cfunction and not nodetype.is_static_method and node.is_called
# The 'const' annotation is only sufficient to infer
# that the receiver object will only be read, not that
# __all__ the reachable subobjects will only be read.
locally_writing = (field_access and node.is_target) or method_call
if objtype.is_qualified_cyp_class and objtype.qualifier == 'locked':
old_writing = self.writing
self.writing = locally_writing
self.visitchildren(node)
if field_access or method_call:
node.obj = ExprNodes.CoerceToLockedNode(node.obj, exclusive=self.writing)
self.writing = old_writing
elif objtype.is_cyp_class:
old_writing = self.writing
self.writing |= locally_writing
self.visitchildren(node)
self.writing = old_writing
return node
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/CythonScope.py 0000664 0000000 0000000 00000014431 13766135517 0031104 0 ustar 00root root 0000000 0000000 from __future__ import absolute_import
from .Symtab import ModuleScope
from .PyrexTypes import *
from .UtilityCode import CythonUtilityCode
from .Errors import error
from .Scanning import StringSourceDescriptor
from . import MemoryView
class CythonScope(ModuleScope):
is_cython_builtin = 1
_cythonscope_initialized = False
def __init__(self, context):
ModuleScope.__init__(self, u'cython', None, None)
self.pxd_file_loaded = True
self.populate_cython_scope()
# The Main.Context object
self.context = context
for fused_type in (cy_integral_type, cy_floating_type, cy_numeric_type):
entry = self.declare_typedef(fused_type.name,
fused_type,
None,
cname='')
entry.in_cinclude = True
def is_cpp(self):
# Allow C++ utility code in C++ contexts.
return self.context.cpp
def lookup_type(self, name):
# This function should go away when types are all first-level objects.
type = parse_basic_type(name)
if type:
return type
return super(CythonScope, self).lookup_type(name)
def lookup(self, name):
entry = super(CythonScope, self).lookup(name)
if entry is None and not self._cythonscope_initialized:
self.load_cythonscope()
entry = super(CythonScope, self).lookup(name)
return entry
def find_module(self, module_name, pos):
error("cython.%s is not available" % module_name, pos)
def find_submodule(self, module_name):
entry = self.entries.get(module_name, None)
if not entry:
self.load_cythonscope()
entry = self.entries.get(module_name, None)
if entry and entry.as_module:
return entry.as_module
else:
# TODO: fix find_submodule control flow so that we're not
# expected to create a submodule here (to protect CythonScope's
# possible immutability). Hack ourselves out of the situation
# for now.
raise error((StringSourceDescriptor(u"cython", u""), 0, 0),
"cython.%s is not available" % module_name)
def lookup_qualified_name(self, qname):
# ExprNode.as_cython_attribute generates qnames and we untangle it here...
name_path = qname.split(u'.')
scope = self
while len(name_path) > 1:
scope = scope.lookup_here(name_path[0])
if scope:
scope = scope.as_module
del name_path[0]
if scope is None:
return None
else:
return scope.lookup_here(name_path[0])
def populate_cython_scope(self):
# These are used to optimize isinstance in FinalOptimizePhase
type_object = self.declare_typedef(
'PyTypeObject',
base_type = c_void_type,
pos = None,
cname = 'PyTypeObject')
type_object.is_void = True
type_object_type = type_object.type
self.declare_cfunction(
'PyObject_TypeCheck',
CFuncType(c_bint_type, [CFuncTypeArg("o", py_object_type, None),
CFuncTypeArg("t", c_ptr_type(type_object_type), None)]),
pos = None,
defining = 1,
cname = 'PyObject_TypeCheck')
def load_cythonscope(self):
"""
Creates some entries for testing purposes and entries for
cython.array() and for cython.view.*.
"""
if self._cythonscope_initialized:
return
self._cythonscope_initialized = True
cython_testscope_utility_code.declare_in_scope(
self, cython_scope=self)
cython_test_extclass_utility_code.declare_in_scope(
self, cython_scope=self)
#
# The view sub-scope
#
self.viewscope = viewscope = ModuleScope(u'view', self, None)
self.declare_module('view', viewscope, None).as_module = viewscope
viewscope.is_cython_builtin = True
viewscope.pxd_file_loaded = True
cythonview_testscope_utility_code.declare_in_scope(
viewscope, cython_scope=self)
view_utility_scope = MemoryView.view_utility_code.declare_in_scope(
self.viewscope, cython_scope=self,
whitelist=MemoryView.view_utility_whitelist)
# Marks the types as being cython_builtin_type so that they can be
# extended from without Cython attempting to import cython.view
ext_types = [ entry.type
for entry in view_utility_scope.entries.values()
if entry.type.is_extension_type ]
for ext_type in ext_types:
ext_type.is_cython_builtin_type = 1
# self.entries["array"] = view_utility_scope.entries.pop("array")
def create_cython_scope(context):
# One could in fact probably make it a singleton,
# but not sure yet whether any code mutates it (which would kill reusing
# it across different contexts)
return CythonScope(context)
# Load test utilities for the cython scope
def load_testscope_utility(cy_util_name, **kwargs):
return CythonUtilityCode.load(cy_util_name, "TestCythonScope.pyx", **kwargs)
undecorated_methods_protos = UtilityCode(proto=u"""
/* These methods are undecorated and have therefore no prototype */
static PyObject *__pyx_TestClass_cdef_method(
struct __pyx_TestClass_obj *self, int value);
static PyObject *__pyx_TestClass_cpdef_method(
struct __pyx_TestClass_obj *self, int value, int skip_dispatch);
static PyObject *__pyx_TestClass_def_method(
PyObject *self, PyObject *value);
""")
cython_testscope_utility_code = load_testscope_utility("TestScope")
test_cython_utility_dep = load_testscope_utility("TestDep")
cython_test_extclass_utility_code = \
load_testscope_utility("TestClass", name="TestClass",
requires=[undecorated_methods_protos,
test_cython_utility_dep])
cythonview_testscope_utility_code = load_testscope_utility("View.TestScope")
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/DebugFlags.py 0000664 0000000 0000000 00000001157 13766135517 0030652 0 ustar 00root root 0000000 0000000 # Can be enabled at the command line with --debug-xxx.
debug_disposal_code = 0
debug_temp_alloc = 0
debug_coercion = 0
# Write comments into the C code that show where temporary variables
# are allocated and released.
debug_temp_code_comments = 0
# Write a call trace of the code generation phase into the C code.
debug_trace_code_generation = 0
# Do not replace exceptions with user-friendly error messages.
debug_no_exception_intercept = 0
# Print a message each time a new stage in the pipeline is entered.
debug_verbose_pipeline = 0
# Raise an exception when an error is encountered.
debug_exception_on_error = 0
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/Errors.py 0000664 0000000 0000000 00000016752 13766135517 0030132 0 ustar 00root root 0000000 0000000 #
# Errors
#
from __future__ import absolute_import
try:
from __builtin__ import basestring as any_string_type
except ImportError:
any_string_type = (bytes, str)
import sys
from contextlib import contextmanager
from ..Utils import open_new_file
from . import DebugFlags
from . import Options
class PyrexError(Exception):
pass
class PyrexWarning(Exception):
pass
def context(position):
source = position[0]
assert not (isinstance(source, any_string_type)), (
"Please replace filename strings with Scanning.FileSourceDescriptor instances %r" % source)
try:
F = source.get_lines()
except UnicodeDecodeError:
# file has an encoding problem
s = u"[unprintable code]\n"
else:
s = u''.join(F[max(0, position[1]-6):position[1]])
s = u'...\n%s%s^\n' % (s, u' '*(position[2]-1))
s = u'%s\n%s%s\n' % (u'-'*60, s, u'-'*60)
return s
def format_position(position):
if position:
return u"%s:%d:%d: " % (position[0].get_error_description(),
position[1], position[2])
return u''
def format_error(message, position):
if position:
pos_str = format_position(position)
cont = context(position)
message = u'\nError compiling Cython file:\n%s\n%s%s' % (cont, pos_str, message or u'')
return message
class CompileError(PyrexError):
def __init__(self, position = None, message = u""):
self.position = position
self.message_only = message
self.formatted_message = format_error(message, position)
self.reported = False
Exception.__init__(self, self.formatted_message)
# Python Exception subclass pickling is broken,
# see https://bugs.python.org/issue1692335
self.args = (position, message)
def __str__(self):
return self.formatted_message
class CompileWarning(PyrexWarning):
def __init__(self, position = None, message = ""):
self.position = position
Exception.__init__(self, format_position(position) + message)
class InternalError(Exception):
# If this is ever raised, there is a bug in the compiler.
def __init__(self, message):
self.message_only = message
Exception.__init__(self, u"Internal compiler error: %s"
% message)
class AbortError(Exception):
# Throw this to stop the compilation immediately.
def __init__(self, message):
self.message_only = message
Exception.__init__(self, u"Abort error: %s" % message)
class CompilerCrash(CompileError):
# raised when an unexpected exception occurs in a transform
def __init__(self, pos, context, message, cause, stacktrace=None):
if message:
message = u'\n' + message
else:
message = u'\n'
self.message_only = message
if context:
message = u"Compiler crash in %s%s" % (context, message)
if stacktrace:
import traceback
message += (
u'\n\nCompiler crash traceback from this point on:\n' +
u''.join(traceback.format_tb(stacktrace)))
if cause:
if not stacktrace:
message += u'\n'
message += u'%s: %s' % (cause.__class__.__name__, cause)
CompileError.__init__(self, pos, message)
# Python Exception subclass pickling is broken,
# see https://bugs.python.org/issue1692335
self.args = (pos, context, message, cause, stacktrace)
class NoElementTreeInstalledException(PyrexError):
"""raised when the user enabled options.gdb_debug but no ElementTree
implementation was found
"""
listing_file = None
num_errors = 0
echo_file = None
def open_listing_file(path, echo_to_stderr = 1):
# Begin a new error listing. If path is None, no file
# is opened, the error counter is just reset.
global listing_file, num_errors, echo_file
if path is not None:
listing_file = open_new_file(path)
else:
listing_file = None
if echo_to_stderr:
echo_file = sys.stderr
else:
echo_file = None
num_errors = 0
def close_listing_file():
global listing_file
if listing_file:
listing_file.close()
listing_file = None
def report_error(err, use_stack=True):
if error_stack and use_stack:
error_stack[-1].append(err)
else:
global num_errors
# See Main.py for why dual reporting occurs. Quick fix for now.
if err.reported: return
err.reported = True
try: line = u"%s\n" % err
except UnicodeEncodeError:
# Python <= 2.5 does this for non-ASCII Unicode exceptions
line = format_error(getattr(err, 'message_only', "[unprintable exception message]"),
getattr(err, 'position', None)) + u'\n'
if listing_file:
try: listing_file.write(line)
except UnicodeEncodeError:
listing_file.write(line.encode('ASCII', 'replace'))
if echo_file:
try: echo_file.write(line)
except UnicodeEncodeError:
echo_file.write(line.encode('ASCII', 'replace'))
num_errors += 1
if Options.fast_fail:
raise AbortError("fatal errors")
def error(position, message):
#print("Errors.error:", repr(position), repr(message)) ###
if position is None:
raise InternalError(message)
err = CompileError(position, message)
if DebugFlags.debug_exception_on_error: raise Exception(err) # debug
report_error(err)
return err
LEVEL = 1 # warn about all errors level 1 or higher
def _write_file_encode(file, line):
try:
file.write(line)
except UnicodeEncodeError:
file.write(line.encode('ascii', 'replace'))
def message(position, message, level=1):
if level < LEVEL:
return
warn = CompileWarning(position, message)
line = u"note: %s\n" % warn
if listing_file:
_write_file_encode(listing_file, line)
if echo_file:
_write_file_encode(echo_file, line)
return warn
def warning(position, message, level=0):
if level < LEVEL:
return
if Options.warning_errors and position:
return error(position, message)
warn = CompileWarning(position, message)
line = u"warning: %s\n" % warn
if listing_file:
_write_file_encode(listing_file, line)
if echo_file:
_write_file_encode(echo_file, line)
return warn
_warn_once_seen = {}
def warn_once(position, message, level=0):
if level < LEVEL or message in _warn_once_seen:
return
warn = CompileWarning(position, message)
line = u"warning: %s\n" % warn
if listing_file:
_write_file_encode(listing_file, line)
if echo_file:
_write_file_encode(echo_file, line)
_warn_once_seen[message] = True
return warn
# These functions can be used to momentarily suppress errors.
error_stack = []
def hold_errors():
error_stack.append([])
def release_errors(ignore=False):
held_errors = error_stack.pop()
if not ignore:
for err in held_errors:
report_error(err)
def held_errors():
return error_stack[-1]
# same as context manager:
@contextmanager
def local_errors(ignore=False):
errors = []
error_stack.append(errors)
try:
yield errors
finally:
release_errors(ignore=ignore)
# this module needs a redesign to support parallel cythonisation, but
# for now, the following works at least in sequential compiler runs
def reset():
_warn_once_seen.clear()
del error_stack[:]
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/ExprNodes.py 0000664 0000000 0000000 00002214323 13766135517 0030561 0 ustar 00root root 0000000 0000000 #
# Parse tree nodes for expressions
#
from __future__ import absolute_import
import cython
cython.declare(error=object, warning=object, warn_once=object, InternalError=object,
CompileError=object, UtilityCode=object, TempitaUtilityCode=object,
StringEncoding=object, operator=object, local_errors=object, report_error=object,
Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object,
list_type=object, tuple_type=object, set_type=object, dict_type=object,
unicode_type=object, str_type=object, bytes_type=object, type_type=object,
Builtin=object, Symtab=object, Utils=object, find_coercion_error=object,
debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object,
bytearray_type=object, slice_type=object, _py_int_types=object,
IS_PYTHON3=cython.bint)
import re
import sys
import copy
import os.path
import operator
from .Errors import (
error, warning, InternalError, CompileError, report_error, local_errors)
from .Code import UtilityCode, TempitaUtilityCode
from . import StringEncoding
from . import Naming
from . import Nodes
from .Nodes import Node, utility_code_for_imports
from . import PyrexTypes
from .PyrexTypes import py_object_type, c_long_type, typecast, error_type, \
unspecified_type
from . import TypeSlots
from .Builtin import list_type, tuple_type, set_type, dict_type, type_type, \
unicode_type, str_type, bytes_type, bytearray_type, basestring_type, slice_type
from . import Builtin
from . import Symtab
from .. import Utils
from .Annotate import AnnotationItem
from . import Future
from ..Debugging import print_call_chain
from .DebugFlags import debug_disposal_code, debug_temp_alloc, \
debug_coercion
from .Pythran import (to_pythran, is_pythran_supported_type, is_pythran_supported_operation_type,
is_pythran_expr, pythran_func_type, pythran_binop_type, pythran_unaryop_type, has_np_pythran,
pythran_indexing_code, pythran_indexing_type, is_pythran_supported_node_or_none, pythran_type,
pythran_is_numpy_func_supported, pythran_get_func_include_file, pythran_functor)
from .PyrexTypes import PythranExpr
try:
from __builtin__ import basestring
except ImportError:
# Python 3
basestring = str
any_string_type = (bytes, str)
else:
# Python 2
any_string_type = (bytes, unicode)
if sys.version_info[0] >= 3:
IS_PYTHON3 = True
_py_int_types = int
else:
IS_PYTHON3 = False
_py_int_types = (int, long)
class NotConstant(object):
_obj = None
def __new__(cls):
if NotConstant._obj is None:
NotConstant._obj = super(NotConstant, cls).__new__(cls)
return NotConstant._obj
def __repr__(self):
return ""
not_a_constant = NotConstant()
constant_value_not_set = object()
# error messages when coercing from key[0] to key[1]
coercion_error_dict = {
# string related errors
(unicode_type, str_type): ("Cannot convert Unicode string to 'str' implicitly."
" This is not portable and requires explicit encoding."),
(unicode_type, bytes_type): "Cannot convert Unicode string to 'bytes' implicitly, encoding required.",
(unicode_type, PyrexTypes.c_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_const_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_const_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(bytes_type, unicode_type): "Cannot convert 'bytes' object to unicode implicitly, decoding required",
(bytes_type, str_type): "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.",
(bytes_type, basestring_type): ("Cannot convert 'bytes' object to basestring implicitly."
" This is not portable to Py3."),
(bytes_type, PyrexTypes.c_py_unicode_ptr_type): "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.",
(bytes_type, PyrexTypes.c_const_py_unicode_ptr_type): (
"Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'."),
(basestring_type, bytes_type): "Cannot convert 'basestring' object to bytes implicitly. This is not portable.",
(str_type, unicode_type): ("str objects do not support coercion to unicode,"
" use a unicode string literal instead (u'')"),
(str_type, bytes_type): "Cannot convert 'str' to 'bytes' implicitly. This is not portable.",
(str_type, PyrexTypes.c_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_const_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_const_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_py_unicode_ptr_type): "'str' objects do not support coercion to C types (use 'unicode'?).",
(str_type, PyrexTypes.c_const_py_unicode_ptr_type): (
"'str' objects do not support coercion to C types (use 'unicode'?)."),
(PyrexTypes.c_char_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_const_char_ptr_type, unicode_type): (
"Cannot convert 'char*' to unicode implicitly, decoding required"),
(PyrexTypes.c_uchar_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_const_uchar_ptr_type, unicode_type): (
"Cannot convert 'char*' to unicode implicitly, decoding required"),
}
def find_coercion_error(type_tuple, default, env):
err = coercion_error_dict.get(type_tuple)
if err is None:
return default
elif (env.directives['c_string_encoding'] and
any(t in type_tuple for t in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_uchar_ptr_type,
PyrexTypes.c_const_char_ptr_type, PyrexTypes.c_const_uchar_ptr_type))):
if type_tuple[1].is_pyobject:
return default
elif env.directives['c_string_encoding'] in ('ascii', 'default'):
return default
else:
return "'%s' objects do not support coercion to C types with non-ascii or non-default c_string_encoding" % type_tuple[0].name
else:
return err
def default_str_type(env):
return {
'bytes': bytes_type,
'bytearray': bytearray_type,
'str': str_type,
'unicode': unicode_type
}.get(env.directives['c_string_type'])
def check_negative_indices(*nodes):
"""
Raise a warning on nodes that are known to have negative numeric values.
Used to find (potential) bugs inside of "wraparound=False" sections.
"""
for node in nodes:
if node is None or (
not isinstance(node.constant_result, _py_int_types) and
not isinstance(node.constant_result, float)):
continue
if node.constant_result < 0:
warning(node.pos,
"the result of using negative indices inside of "
"code sections marked as 'wraparound=False' is "
"undefined", level=1)
def infer_sequence_item_type(env, seq_node, index_node=None, seq_type=None):
if not seq_node.is_sequence_constructor:
if seq_type is None:
seq_type = seq_node.infer_type(env)
if seq_type is tuple_type:
# tuples are immutable => we can safely follow assignments
if seq_node.cf_state and len(seq_node.cf_state) == 1:
try:
seq_node = seq_node.cf_state[0].rhs
except AttributeError:
pass
if seq_node is not None and seq_node.is_sequence_constructor:
if index_node is not None and index_node.has_constant_result():
try:
item = seq_node.args[index_node.constant_result]
except (ValueError, TypeError, IndexError):
pass
else:
return item.infer_type(env)
# if we're lucky, all items have the same type
item_types = set([item.infer_type(env) for item in seq_node.args])
if len(item_types) == 1:
return item_types.pop()
return None
def make_dedup_key(outer_type, item_nodes):
"""
Recursively generate a deduplication key from a sequence of values.
Includes Cython node types to work around the fact that (1, 2.0) == (1.0, 2), for example.
@param outer_type: The type of the outer container.
@param item_nodes: A sequence of constant nodes that will be traversed recursively.
@return: A tuple that can be used as a dict key for deduplication.
"""
item_keys = [
(py_object_type, None, type(None)) if node is None
# For sequences and their "mult_factor", see TupleNode.
else make_dedup_key(node.type, [node.mult_factor if node.is_literal else None] + node.args) if node.is_sequence_constructor
else make_dedup_key(node.type, (node.start, node.stop, node.step)) if node.is_slice
# For constants, look at the Python value type if we don't know the concrete Cython type.
else (node.type, node.constant_result,
type(node.constant_result) if node.type is py_object_type else None) if node.has_constant_result()
else None # something we cannot handle => short-circuit below
for node in item_nodes
]
if None in item_keys:
return None
return outer_type, tuple(item_keys)
# Returns a block of code to translate the exception,
# plus a boolean indicating whether to check for Python exceptions.
def get_exception_handler(exception_value):
if exception_value is None:
return "__Pyx_CppExn2PyErr();", False
elif (exception_value.type == PyrexTypes.c_char_type
and exception_value.value == '*'):
return "__Pyx_CppExn2PyErr();", True
elif exception_value.type.is_pyobject:
return (
'try { throw; } catch(const std::exception& exn) {'
'PyErr_SetString(%s, exn.what());'
'} catch(...) { PyErr_SetNone(%s); }' % (
exception_value.entry.cname,
exception_value.entry.cname),
False)
else:
return (
'%s(); if (!PyErr_Occurred())'
'PyErr_SetString(PyExc_RuntimeError, '
'"Error converting c++ exception.");' % (
exception_value.entry.cname),
False)
def maybe_check_py_error(code, check_py_exception, pos, nogil):
if check_py_exception:
if nogil:
code.putln(code.error_goto_if("__Pyx_ErrOccurredWithGIL()", pos))
else:
code.putln(code.error_goto_if("PyErr_Occurred()", pos))
def translate_cpp_exception(code, pos, inside, py_result, exception_value, nogil):
raise_py_exception, check_py_exception = get_exception_handler(exception_value)
code.putln("try {")
code.putln("%s" % inside)
if py_result:
code.putln(code.error_goto_if_null(py_result, pos))
maybe_check_py_error(code, check_py_exception, pos, nogil)
code.putln("} catch(...) {")
if nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(raise_py_exception)
if nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(pos))
code.putln("}")
# Used to handle the case where an lvalue expression and an overloaded assignment
# both have an exception declaration.
def translate_double_cpp_exception(code, pos, lhs_type, lhs_code, rhs_code, lhs_exc_val, assign_exc_val, nogil):
handle_lhs_exc, lhc_check_py_exc = get_exception_handler(lhs_exc_val)
handle_assignment_exc, assignment_check_py_exc = get_exception_handler(assign_exc_val)
code.putln("try {")
code.putln(lhs_type.declaration_code("__pyx_local_lvalue = %s;" % lhs_code))
maybe_check_py_error(code, lhc_check_py_exc, pos, nogil)
code.putln("try {")
code.putln("__pyx_local_lvalue = %s;" % rhs_code)
maybe_check_py_error(code, assignment_check_py_exc, pos, nogil)
# Catch any exception from the overloaded assignment.
code.putln("} catch(...) {")
if nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(handle_assignment_exc)
if nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(pos))
code.putln("}")
# Catch any exception from evaluating lhs.
code.putln("} catch(...) {")
if nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(handle_lhs_exc)
if nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(pos))
code.putln('}')
class ExprNode(Node):
# subexprs [string] Class var holding names of subexpr node attrs
# type PyrexType Type of the result
# result_code string Code fragment
# result_ctype string C type of result_code if different from type
# is_temp boolean Result is in a temporary variable
# is_sequence_constructor
# boolean Is a list or tuple constructor expression
# is_starred boolean Is a starred expression (e.g. '*a')
# saved_subexpr_nodes
# [ExprNode or [ExprNode or None] or None]
# Cached result of subexpr_nodes()
# postpone_subexpr_disposal
# boolean
# Postpone disposing of the subexpressions of a
# temporary node until it is itself freed.
# use_managed_ref boolean use ref-counted temps/assignments/etc.
# result_is_used boolean indicates that the result will be dropped and the
# is_numpy_attribute boolean Is a Numpy module attribute
# result_code/temp_result can safely be set to None
# annotation ExprNode or None PEP526 annotation for names or expressions
result_ctype = None
type = None
annotation = None
temp_code = None
old_temp = None # error checker for multiple frees etc.
use_managed_ref = True # can be set by optimisation transforms
result_is_used = True
is_numpy_attribute = False
# The Analyse Expressions phase for expressions is split
# into two sub-phases:
#
# Analyse Types
# Determines the result type of the expression based
# on the types of its sub-expressions, and inserts
# coercion nodes into the expression tree where needed.
# Marks nodes which will need to have temporary variables
# allocated.
#
# Allocate Temps
# Allocates temporary variables where needed, and fills
# in the result_code field of each node.
#
# ExprNode provides some convenience routines which
# perform both of the above phases. These should only
# be called from statement nodes, and only when no
# coercion nodes need to be added around the expression
# being analysed. In that case, the above two phases
# should be invoked separately.
#
# Framework code in ExprNode provides much of the common
# processing for the various phases. It makes use of the
# 'subexprs' class attribute of ExprNodes, which should
# contain a list of the names of attributes which can
# hold sub-nodes or sequences of sub-nodes.
#
# The framework makes use of a number of abstract methods.
# Their responsibilities are as follows.
#
# Declaration Analysis phase
#
# analyse_target_declaration
# Called during the Analyse Declarations phase to analyse
# the LHS of an assignment or argument of a del statement.
# Nodes which cannot be the LHS of an assignment need not
# implement it.
#
# Expression Analysis phase
#
# analyse_types
# - Call analyse_types on all sub-expressions.
# - Check operand types, and wrap coercion nodes around
# sub-expressions where needed.
# - Set the type of this node.
# - If a temporary variable will be required for the
# result, set the is_temp flag of this node.
#
# analyse_target_types
# Called during the Analyse Types phase to analyse
# the LHS of an assignment or argument of a del
# statement. Similar responsibilities to analyse_types.
#
# target_code
# Called by the default implementation of allocate_target_temps.
# Should return a C lvalue for assigning to the node. The default
# implementation calls calculate_result_code.
#
# check_const
# - Check that this node and its subnodes form a
# legal constant expression. If so, do nothing,
# otherwise call not_const.
#
# The default implementation of check_const
# assumes that the expression is not constant.
#
# check_const_addr
# - Same as check_const, except check that the
# expression is a C lvalue whose address is
# constant. Otherwise, call addr_not_const.
#
# The default implementation of calc_const_addr
# assumes that the expression is not a constant
# lvalue.
#
# Code Generation phase
#
# generate_evaluation_code
# - Call generate_evaluation_code for sub-expressions.
# - Perform the functions of generate_result_code
# (see below).
# - If result is temporary, call generate_disposal_code
# on all sub-expressions.
#
# A default implementation of generate_evaluation_code
# is provided which uses the following abstract methods:
#
# generate_result_code
# - Generate any C statements necessary to calculate
# the result of this node from the results of its
# sub-expressions.
#
# calculate_result_code
# - Should return a C code fragment evaluating to the
# result. This is only called when the result is not
# a temporary.
#
# generate_assignment_code
# Called on the LHS of an assignment.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the assignment.
# - If the assignment absorbed a reference, call
# generate_post_assignment_code on the RHS,
# otherwise call generate_disposal_code on it.
#
# generate_deletion_code
# Called on an argument of a del statement.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the deletion.
# - Call generate_disposal_code on all sub-expressions.
#
#
is_sequence_constructor = False
is_dict_literal = False
is_set_literal = False
is_string_literal = False
is_attribute = False
is_subscript = False
is_slice = False
is_buffer_access = False
is_memview_index = False
is_memview_slice = False
is_memview_broadcast = False
is_memview_copy_assignment = False
saved_subexpr_nodes = None
postpone_subexpr_disposal = False
is_temp = False
has_temp_moved = False # if True then attempting to do anything but free the temp is invalid
is_target = False
is_starred = False
constant_result = constant_value_not_set
child_attrs = property(fget=operator.attrgetter('subexprs'))
def not_implemented(self, method_name):
print_call_chain(method_name, "not implemented")
raise InternalError(
"%s.%s not implemented" % (self.__class__.__name__, method_name))
def is_lvalue(self):
return 0
def is_addressable(self):
return self.is_lvalue() and not self.type.is_memoryviewslice
def is_ephemeral(self):
# An ephemeral node is one whose result is in
# a Python temporary and we suspect there are no
# other references to it. Certain operations are
# disallowed on such values, since they are
# likely to result in a dangling pointer.
return self.type.is_pyobject and self.is_temp
def subexpr_nodes(self):
# Extract a list of subexpression nodes based
# on the contents of the subexprs class attribute.
nodes = []
for name in self.subexprs:
item = getattr(self, name)
if item is not None:
if type(item) is list:
nodes.extend(item)
else:
nodes.append(item)
return nodes
def result(self):
if self.is_temp:
#if not self.temp_code:
# pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)'
# raise RuntimeError("temp result name not set in %s at %r" % (
# self.__class__.__name__, pos))
return self.temp_code
else:
return self.calculate_result_code()
def _make_move_result_rhs(self, result, allow_move=True):
if (self.is_temp and allow_move and self.type.is_cpp_class
and not self.type.is_cyp_class and not self.type.is_reference):
self.has_temp_moved = True
return "__PYX_STD_MOVE_IF_SUPPORTED({0})".format(result)
else:
return result
def move_result_rhs(self):
return self._make_move_result_rhs(self.result())
def move_result_rhs_as(self, type):
allow_move = (type and not type.is_reference and not type.needs_refcounting)
return self._make_move_result_rhs(self.result_as(type),
allow_move=allow_move)
def pythran_result(self, type_=None):
if is_pythran_supported_node_or_none(self):
return to_pythran(self)
assert(type_ is not None)
return to_pythran(self, type_)
def is_c_result_required(self):
"""
Subtypes may return False here if result temp allocation can be skipped.
"""
return True
def result_as(self, type = None):
# Return the result code cast to the specified C type.
if (self.is_temp and self.type.is_pyobject and
type != py_object_type):
# Allocated temporaries are always PyObject *, which may not
# reflect the actual type (e.g. an extension type)
return typecast(type, py_object_type, self.result())
return typecast(type, self.ctype(), self.result())
def py_result(self):
# Return the result code cast to PyObject *.
return self.result_as(py_object_type)
def ctype(self):
# Return the native C type of the result (i.e. the
# C type of the result_code expression).
return self.result_ctype or self.type
def get_constant_c_result_code(self):
# Return the constant value of this node as a result code
# string, or None if the node is not constant. This method
# can be called when the constant result code is required
# before the code generation phase.
#
# The return value is a string that can represent a simple C
# value, a constant C name or a constant C expression. If the
# node type depends on Python code, this must return None.
return None
def calculate_constant_result(self):
# Calculate the constant compile time result value of this
# expression and store it in ``self.constant_result``. Does
# nothing by default, thus leaving ``self.constant_result``
# unknown. If valid, the result can be an arbitrary Python
# value.
#
# This must only be called when it is assured that all
# sub-expressions have a valid constant_result value. The
# ConstantFolding transform will do this.
pass
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def compile_time_value(self, denv):
# Return value of compile-time expression, or report error.
error(self.pos, "Invalid compile-time expression")
def compile_time_value_error(self, e):
error(self.pos, "Error in compile-time expression: %s: %s" % (
e.__class__.__name__, e))
# ------------- Declaration Analysis ----------------
def analyse_target_declaration(self, env):
error(self.pos, "Cannot assign to or delete this")
# ------------- Expression Analysis ----------------
def analyse_const_expression(self, env):
# Called during the analyse_declarations phase of a
# constant expression. Analyses the expression's type,
# checks whether it is a legal const expression,
# and determines its value.
node = self.analyse_types(env)
node.check_const()
return node
def analyse_expressions(self, env):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for a whole
# expression.
return self.analyse_types(env)
def analyse_target_expression(self, env, rhs):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for the LHS of
# an assignment.
return self.analyse_target_types(env)
def analyse_boolean_expression(self, env):
# Analyse expression and coerce to a boolean.
node = self.analyse_types(env)
bool = node.coerce_to_boolean(env)
return bool
def analyse_temp_boolean_expression(self, env):
# Analyse boolean expression and coerce result into
# a temporary. This is used when a branch is to be
# performed on the result and we won't have an
# opportunity to ensure disposal code is executed
# afterwards. By forcing the result into a temporary,
# we ensure that all disposal has been done by the
# time we get the result.
node = self.analyse_types(env)
return node.coerce_to_boolean(env).coerce_to_simple(env)
# --------------- Type Inference -----------------
def type_dependencies(self, env):
# Returns the list of entries whose types must be determined
# before the type of self can be inferred.
if getattr(self, 'type', None) is not None:
return ()
return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ())
def infer_type(self, env):
# Attempt to deduce the type of self.
# Differs from analyse_types as it avoids unnecessary
# analysis of subexpressions, but can assume everything
# in self.type_dependencies() has been resolved.
type = getattr(self, 'type', None)
if type is not None:
return type
entry = getattr(self, 'entry', None)
if entry is not None:
return entry.type
self.not_implemented("infer_type")
def nonlocally_immutable(self):
# Returns whether this variable is a safe reference, i.e.
# can't be modified as part of globals or closures.
return self.is_literal or self.is_temp or self.type.is_array or self.type.is_cfunction
def inferable_item_node(self, index=0):
"""
Return a node that represents the (type) result of an indexing operation,
e.g. for tuple unpacking or iteration.
"""
return IndexNode(self.pos, base=self, index=IntNode(
self.pos, value=str(index), constant_result=index, type=PyrexTypes.c_py_ssize_t_type))
# --------------- Type Analysis ------------------
def analyse_as_module(self, env):
# If this node can be interpreted as a reference to a
# cimported module, return its scope, else None.
return None
def analyse_as_type(self, env):
# If this node can be interpreted as a reference to a
# type, return that type, else None.
return None
def analyse_as_specialized_type(self, env):
type = self.analyse_as_type(env)
if type and type.is_fused and env.fused_to_specific:
# while it would be nice to test "if entry.type in env.fused_to_specific"
# rather than try/catch this doesn't work reliably (mainly for nested fused types)
try:
return type.specialize(env.fused_to_specific)
except KeyError:
pass
if type and type.is_fused:
error(self.pos, "Type is not specific")
return type
def analyse_as_extension_type(self, env):
# If this node can be interpreted as a reference to an
# extension type or builtin type, return its type, else None.
return None
def analyse_types(self, env):
self.not_implemented("analyse_types")
def analyse_target_types(self, env):
return self.analyse_types(env)
def nogil_check(self, env):
# By default, any expression based on Python objects is
# prevented in nogil environments. Subtypes must override
# this if they can work without the GIL.
if self.type and self.type.is_pyobject:
self.gil_error()
def gil_assignment_check(self, env):
if env.nogil and self.type.is_pyobject:
error(self.pos, "Assignment of Python object not allowed without gil")
def check_const(self):
self.not_const()
return False
def not_const(self):
error(self.pos, "Not allowed in a constant expression")
def check_const_addr(self):
self.addr_not_const()
return False
def addr_not_const(self):
error(self.pos, "Address is not constant")
# ----------------- Result Allocation -----------------
def result_in_temp(self):
# Return true if result is in a temporary owned by
# this node or one of its subexpressions. Overridden
# by certain nodes which can share the result of
# a subnode.
return self.is_temp
def result_is_new_reference(self):
# Return true if the result is a new reference that is
# already incref-ed and will need to be decref-ed later.
return self.result_in_temp()
def target_code(self):
# Return code fragment for use as LHS of a C assignment.
return self.calculate_result_code()
def calculate_result_code(self):
self.not_implemented("calculate_result_code")
# def release_target_temp(self, env):
# # Release temporaries used by LHS of an assignment.
# self.release_subexpr_temps(env)
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("Temp allocated multiple times in %r: %r" % (self.__class__.__name__, self.pos))
type = self.type
if not type.is_void:
if type.is_pyobject:
type = PyrexTypes.py_object_type
elif not (type.is_cyp_class or self.result_is_used or type.is_memoryviewslice or self.is_c_result_required()):
self.temp_code = None
return
self.temp_code = code.funcstate.allocate_temp(
type, manage_ref=self.use_managed_ref)
else:
self.temp_code = None
def release_temp_result(self, code):
if not self.temp_code:
if not self.result_is_used:
# not used anyway, so ignore if not set up
return
pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)'
if self.old_temp:
raise RuntimeError("temp %s released multiple times in %s at %r" % (
self.old_temp, self.__class__.__name__, pos))
else:
raise RuntimeError("no temp, but release requested in %s at %r" % (
self.__class__.__name__, pos))
code.funcstate.release_temp(self.temp_code)
self.old_temp = self.temp_code
self.temp_code = None
# ---------------- Code Generation -----------------
def make_owned_reference(self, code):
"""
Make sure we own a reference to result.
If the result is in a temp, it is already a new reference.
"""
if not self.result_is_new_reference():
# FIXME: is this verification really necessary ?
if self.type.is_cyp_class and "NULL" in self.result():
pass
else:
code.put_incref(self.result(), self.ctype())
def make_owned_memoryviewslice(self, code):
"""
Make sure we own the reference to this memoryview slice.
"""
# TODO ideally this would be shared with "make_owned_reference"
if not self.result_is_new_reference():
code.put_incref_memoryviewslice(self.result(), self.type,
have_gil=not self.in_nogil_context)
def generate_evaluation_code(self, code):
# Generate code to evaluate this node and
# its sub-expressions, and dispose of any
# temporary results of its sub-expressions.
self.generate_subexpr_evaluation_code(code)
code.mark_pos(self.pos)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_result_code(code)
if self.is_temp and not (self.type.is_string or self.type.is_pyunicode_ptr or self.postpone_subexpr_disposal):
# If we are temp we do not need to wait until this node is disposed
# before disposing children.
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def generate_subexpr_evaluation_code(self, code):
for node in self.subexpr_nodes():
node.generate_evaluation_code(code)
def generate_result_code(self, code):
self.not_implemented("generate_result_code")
def generate_disposal_code(self, code):
if self.is_temp:
if self.type.is_string or self.type.is_pyunicode_ptr or self.postpone_subexpr_disposal:
# postponed from self.generate_evaluation_code()
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
if self.result():
if self.type.is_cyp_class:
code.put_xdecref_clear(self.result(), self.ctype(),
have_gil=not self.in_nogil_context)
else:
code.put_decref_clear(self.result(), self.ctype(),
have_gil=not self.in_nogil_context)
if self.has_temp_moved:
code.globalstate.use_utility_code(
UtilityCode.load_cached("MoveIfSupported", "CppSupport.cpp"))
else:
# Already done if self.is_temp
self.generate_subexpr_disposal_code(code)
def generate_subexpr_disposal_code(self, code):
# Generate code to dispose of temporary results
# of all sub-expressions.
for node in self.subexpr_nodes():
node.generate_disposal_code(code)
def generate_post_assignment_code(self, code):
if self.is_temp:
if self.type.is_string or self.type.is_pyunicode_ptr or self.postpone_subexpr_disposal:
# postponed from self.generate_evaluation_code()
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
elif self.type.is_pyobject:
code.putln("%s = 0;" % self.result())
elif self.type.is_cyp_class:
code.putln("%s = 0;" % self.result())
elif self.type.is_memoryviewslice:
code.putln("%s.memview = NULL;" % self.result())
code.putln("%s.data = NULL;" % self.result())
if self.has_temp_moved:
code.globalstate.use_utility_code(
UtilityCode.load_cached("MoveIfSupported", "CppSupport.cpp"))
else:
self.generate_subexpr_disposal_code(code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
# Stub method for nodes which are not legal as
# the LHS of an assignment. An error will have
# been reported earlier.
pass
def generate_deletion_code(self, code, ignore_nonexisting=False):
# Stub method for nodes that are not legal as
# the argument of a del statement. An error
# will have been reported earlier.
pass
def free_temps(self, code):
if self.is_temp:
if not self.type.is_void:
self.release_temp_result(code)
else:
self.free_subexpr_temps(code)
def free_subexpr_temps(self, code):
for sub in self.subexpr_nodes():
sub.free_temps(code)
def generate_function_definitions(self, env, code):
pass
# ----Generation of small bits of reference counting --
def generate_decref_set(self, code, rhs):
code.put_decref_set(self.result(), self.ctype(), rhs)
def generate_xdecref_set(self, code, rhs):
code.put_xdecref_set(self.result(), self.ctype(), rhs)
def generate_gotref(self, code, handle_null=False,
maybe_null_extra_check=True):
if not (handle_null and self.cf_is_null):
if (handle_null and self.cf_maybe_null
and maybe_null_extra_check):
self.generate_xgotref(code)
else:
code.put_gotref(self.result(), self.ctype())
def generate_xgotref(self, code):
code.put_xgotref(self.result(), self.ctype())
def generate_giveref(self, code):
code.put_giveref(self.result(), self.ctype())
def generate_xgiveref(self, code):
code.put_xgiveref(self.result(), self.ctype())
# ---------------- Annotation ---------------------
def annotate(self, code):
for node in self.subexpr_nodes():
node.annotate(code)
# ----------------- Coercion ----------------------
def coerce_to(self, dst_type, env):
# Coerce the result so that it can be assigned to
# something of type dst_type. If processing is necessary,
# wraps this node in a coercion node and returns that.
# Otherwise, returns this node unchanged.
#
# This method is called during the analyse_expressions
# phase of the src_node's processing.
#
# Note that subclasses that override this (especially
# ConstNodes) must not (re-)set their own .type attribute
# here. Since expression nodes may turn up in different
# places in the tree (e.g. inside of CloneNodes in cascaded
# assignments), this method must return a new node instance
# if it changes the type.
#
src = self
src_type = self.type
if self.check_for_coercion_error(dst_type, env):
return self
used_as_reference = dst_type.is_reference
if used_as_reference and not src_type.is_reference:
dst_type = dst_type.ref_base_type
if src_type.is_cv_qualified:
src_type = src_type.cv_base_type
if src_type.is_fused or dst_type.is_fused:
# See if we are coercing a fused function to a pointer to a
# specialized function
if (src_type.is_cfunction and not dst_type.is_fused and
dst_type.is_ptr and dst_type.base_type.is_cfunction):
dst_type = dst_type.base_type
for signature in src_type.get_all_specialized_function_types():
if signature.same_as(dst_type):
src.type = signature
src.entry = src.type.entry
src.entry.used = True
return self
if src_type.is_fused:
error(self.pos, "Type is not specialized")
elif src_type.is_null_ptr and dst_type.is_ptr:
# NULL can be implicitly cast to any pointer type
return self
else:
error(self.pos, "Cannot coerce to a type that is not specialized")
self.type = error_type
return self
if self.coercion_type is not None:
# This is purely for error checking purposes!
node = NameNode(self.pos, name='', type=self.coercion_type)
node.coerce_to(dst_type, env)
if dst_type.is_memoryviewslice:
from . import MemoryView
if not src.type.is_memoryviewslice:
if src.type.is_pyobject:
src = CoerceToMemViewSliceNode(src, dst_type, env)
elif src.type.is_array:
src = CythonArrayNode.from_carray(src, env).coerce_to(dst_type, env)
elif not src_type.is_error:
error(self.pos,
"Cannot convert '%s' to memoryviewslice" % (src_type,))
else:
if src.type.writable_needed:
dst_type.writable_needed = True
if not src.type.conforms_to(dst_type, broadcast=self.is_memview_broadcast,
copying=self.is_memview_copy_assignment):
if src.type.dtype.same_as(dst_type.dtype):
msg = "Memoryview '%s' not conformable to memoryview '%s'."
tup = src.type, dst_type
else:
msg = "Different base types for memoryviews (%s, %s)"
tup = src.type.dtype, dst_type.dtype
error(self.pos, msg % tup)
elif dst_type.is_pyobject:
if not src.type.is_pyobject:
if dst_type is bytes_type and src.type.is_int:
src = CoerceIntToBytesNode(src, env)
else:
src = CoerceToPyTypeNode(src, env, type=dst_type)
if not src.type.subtype_of(dst_type):
if src.constant_result is not None:
src = PyTypeTestNode(src, dst_type, env)
elif is_pythran_expr(dst_type) and is_pythran_supported_type(src.type):
# We let the compiler decide whether this is valid
return src
elif is_pythran_expr(src.type):
if is_pythran_supported_type(dst_type):
# Match the case were a pythran expr is assigned to a value, or vice versa.
# We let the C++ compiler decide whether this is valid or not!
return src
# Else, we need to convert the Pythran expression to a Python object
src = CoerceToPyTypeNode(src, env, type=dst_type)
elif src.type.is_pyobject:
if used_as_reference and dst_type.is_cpp_class:
warning(
self.pos,
"Cannot pass Python object as C++ data structure reference (%s &), will pass by copy." % dst_type)
src = CoerceFromPyTypeNode(dst_type, src, env)
elif (dst_type.is_complex
and src_type != dst_type
and dst_type.assignable_from(src_type)):
src = CoerceToComplexNode(src, dst_type, env)
else:
# neither src nor dst are py types
# Qualified cypclass types can have nontrivial aliasing rules
# meaning type equality may not imply the coercion is valid.
if src_type.is_qualified_cyp_class and not dst_type.assignable_from(src_type):
self.fail_assignment(dst_type)
# Added the string comparison, since for c types that
# is enough, but Cython gets confused when the types are
# in different pxi files.
# TODO: Remove this hack and require shared declarations.
elif not (src.type == dst_type or str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)):
self.fail_assignment(dst_type)
return src
def fail_assignment(self, dst_type):
error(self.pos, "Cannot assign type '%s' to '%s'" % (self.type, dst_type))
def check_for_coercion_error(self, dst_type, env, fail=False, default=None):
if fail and not default:
default = "Cannot assign type '%(FROM)s' to '%(TO)s'"
message = find_coercion_error((self.type, dst_type), default, env)
if message is not None:
error(self.pos, message % {'FROM': self.type, 'TO': dst_type})
return True
if fail:
self.fail_assignment(dst_type)
return True
return False
def coerce_to_pyobject(self, env):
return self.coerce_to(PyrexTypes.py_object_type, env)
def coerce_to_boolean(self, env):
# Coerce result to something acceptable as
# a boolean value.
# if it's constant, calculate the result now
if self.has_constant_result():
bool_value = bool(self.constant_result)
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
type = self.type
if type.is_enum or type.is_error:
return self
elif type.is_pyobject or type.is_int or type.is_ptr or type.is_float:
return CoerceToBooleanNode(self, env)
elif type.is_cpp_class:
if type.is_cyp_class:
has_bool_operator = type.scope.lookup_here("operator bool")
# let cypclass truth-testing fall back on the value of the pointer
if not has_bool_operator:
return CoerceToBooleanNode(self, env)
return SimpleCallNode(
self.pos,
function=AttributeNode(
self.pos, obj=self, attribute='operator bool'),
args=[]).analyse_types(env)
elif type.is_ctuple:
bool_value = len(type.components) == 0
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
else:
error(self.pos, "Type '%s' not acceptable as a boolean" % type)
return self
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.type.is_int:
return self
else:
return self.coerce_to(PyrexTypes.c_long_type, env)
def coerce_to_temp(self, env):
# Ensure that the result is in a temporary.
if self.result_in_temp():
return self
else:
return CoerceToTempNode(self, env)
def coerce_to_simple(self, env):
# Ensure that the result is simple (see is_simple).
if self.is_simple():
return self
else:
return self.coerce_to_temp(env)
def is_simple(self):
# A node is simple if its result is something that can
# be referred to without performing any operations, e.g.
# a constant, local var, C global var, struct member
# reference, or temporary.
return self.result_in_temp()
def may_be_none(self):
if self.type and not (self.type.is_pyobject or
self.type.is_memoryviewslice):
return False
if self.has_constant_result():
return self.constant_result is not None
return True
def as_cython_attribute(self):
return None
def as_none_safe_node(self, message, error="PyExc_TypeError", format_args=()):
# Wraps the node in a NoneCheckNode if it is not known to be
# not-None (e.g. because it is a Python literal).
if self.may_be_none():
return NoneCheckNode(self, error, message, format_args)
else:
return self
@classmethod
def from_node(cls, node, **kwargs):
"""Instantiate this node class from another node, properly
copying over all attributes that one would forget otherwise.
"""
attributes = "cf_state cf_maybe_null cf_is_null constant_result".split()
for attr_name in attributes:
if attr_name in kwargs:
continue
try:
value = getattr(node, attr_name)
except AttributeError:
pass
else:
kwargs[attr_name] = value
return cls(node.pos, **kwargs)
class AtomicExprNode(ExprNode):
# Abstract base class for expression nodes which have
# no sub-expressions.
subexprs = []
# Override to optimize -- we know we have no children
def generate_subexpr_evaluation_code(self, code):
pass
def generate_subexpr_disposal_code(self, code):
pass
class PyConstNode(AtomicExprNode):
# Abstract base class for constant Python values.
is_literal = 1
type = py_object_type
nogil_check = None
def is_simple(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self
def calculate_result_code(self):
return self.value
def generate_result_code(self, code):
pass
class NoneNode(PyConstNode):
# The constant value None
is_none = 1
value = "Py_None"
constant_result = None
def compile_time_value(self, denv):
return None
def may_be_none(self):
return True
def coerce_to(self, dst_type, env):
if not (dst_type.is_pyobject or dst_type.is_memoryviewslice or dst_type.is_error):
# Catch this error early and loudly.
error(self.pos, "Cannot assign None to %s" % dst_type)
return super(NoneNode, self).coerce_to(dst_type, env)
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
value = "Py_Ellipsis"
constant_result = Ellipsis
def compile_time_value(self, denv):
return Ellipsis
class ConstNode(AtomicExprNode):
# Abstract base type for literal constant nodes.
#
# value string C code fragment
is_literal = 1
nogil_check = None
def is_simple(self):
return 1
def nonlocally_immutable(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self # Types are held in class variables
def check_const(self):
return True
def get_constant_c_result_code(self):
return self.calculate_result_code()
def calculate_result_code(self):
return str(self.value)
def generate_result_code(self, code):
pass
class BoolNode(ConstNode):
type = PyrexTypes.c_bint_type
# The constant value True or False
def calculate_constant_result(self):
self.constant_result = self.value
def compile_time_value(self, denv):
return self.value
def calculate_result_code(self):
if self.type.is_pyobject:
return 'Py_True' if self.value else 'Py_False'
else:
return str(int(self.value))
def coerce_to(self, dst_type, env):
if dst_type == self.type:
return self
if dst_type is py_object_type and self.type is Builtin.bool_type:
return self
if dst_type.is_pyobject and self.type.is_int:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.bool_type)
if dst_type.is_int and self.type.is_pyobject:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type)
return ConstNode.coerce_to(self, dst_type, env)
class NullNode(ConstNode):
type = PyrexTypes.c_null_ptr_type
value = "NULL"
constant_result = 0
def get_constant_c_result_code(self):
return self.value
class CharNode(ConstNode):
type = PyrexTypes.c_char_type
def calculate_constant_result(self):
self.constant_result = ord(self.value)
def compile_time_value(self, denv):
return ord(self.value)
def calculate_result_code(self):
return "'%s'" % StringEncoding.escape_char(self.value)
class IntNode(ConstNode):
# unsigned "" or "U"
# longness "" or "L" or "LL"
# is_c_literal True/False/None creator considers this a C integer literal
unsigned = ""
longness = ""
is_c_literal = None # unknown
def __init__(self, pos, **kwds):
ExprNode.__init__(self, pos, **kwds)
if 'type' not in kwds:
self.type = self.find_suitable_type_for_value()
def find_suitable_type_for_value(self):
if self.constant_result is constant_value_not_set:
try:
self.calculate_constant_result()
except ValueError:
pass
# we ignore 'is_c_literal = True' and instead map signed 32bit
# integers as C long values
if self.is_c_literal or \
not self.has_constant_result() or \
self.unsigned or self.longness == 'LL':
# clearly a C literal
rank = (self.longness == 'LL') and 2 or 1
suitable_type = PyrexTypes.modifiers_and_name_to_type[not self.unsigned, rank, "int"]
if self.type:
suitable_type = PyrexTypes.widest_numeric_type(suitable_type, self.type)
else:
# C literal or Python literal - split at 32bit boundary
if -2**31 <= self.constant_result < 2**31:
if self.type and self.type.is_int:
suitable_type = self.type
else:
suitable_type = PyrexTypes.c_long_type
else:
suitable_type = PyrexTypes.py_object_type
return suitable_type
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
elif dst_type.is_float:
if self.has_constant_result():
return FloatNode(self.pos, value='%d.0' % int(self.constant_result), type=dst_type,
constant_result=float(self.constant_result))
else:
return FloatNode(self.pos, value=self.value, type=dst_type,
constant_result=not_a_constant)
if dst_type.is_numeric and not dst_type.is_complex:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type=dst_type, is_c_literal=True,
unsigned=self.unsigned, longness=self.longness)
return node
elif dst_type.is_pyobject:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type=PyrexTypes.py_object_type, is_c_literal=False,
unsigned=self.unsigned, longness=self.longness)
else:
# FIXME: not setting the type here to keep it working with
# complex numbers. Should they be special cased?
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
unsigned=self.unsigned, longness=self.longness)
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def coerce_to_boolean(self, env):
return IntNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type,
unsigned=self.unsigned, longness=self.longness)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
# pre-allocate a Python version of the number
plain_integer_string = str(Utils.str_to_number(self.value))
self.result_code = code.get_py_int(plain_integer_string, self.longness)
else:
self.result_code = self.get_constant_c_result_code()
def get_constant_c_result_code(self):
unsigned, longness = self.unsigned, self.longness
literal = self.value_as_c_integer_string()
if not (unsigned or longness) and self.type.is_int and literal[0] == '-' and literal[1] != '0':
# negative decimal literal => guess longness from type to prevent wrap-around
if self.type.rank >= PyrexTypes.c_longlong_type.rank:
longness = 'LL'
elif self.type.rank >= PyrexTypes.c_long_type.rank:
longness = 'L'
return literal + unsigned + longness
def value_as_c_integer_string(self):
value = self.value
if len(value) <= 2:
# too short to go wrong (and simplifies code below)
return value
neg_sign = ''
if value[0] == '-':
neg_sign = '-'
value = value[1:]
if value[0] == '0':
literal_type = value[1] # 0'o' - 0'b' - 0'x'
# 0x123 hex literals and 0123 octal literals work nicely in C
# but C-incompatible Py3 oct/bin notations need conversion
if neg_sign and literal_type in 'oOxX0123456789' and value[2:].isdigit():
# negative hex/octal literal => prevent C compiler from using
# unsigned integer types by converting to decimal (see C standard 6.4.4.1)
value = str(Utils.str_to_number(value))
elif literal_type in 'oO':
value = '0' + value[2:] # '0o123' => '0123'
elif literal_type in 'bB':
value = str(int(value[2:], 2))
elif value.isdigit() and not self.unsigned and not self.longness:
if not neg_sign:
# C compilers do not consider unsigned types for decimal literals,
# but they do for hex (see C standard 6.4.4.1)
value = '0x%X' % int(value)
return neg_sign + value
def calculate_result_code(self):
return self.result_code
def calculate_constant_result(self):
self.constant_result = Utils.str_to_number(self.value)
def compile_time_value(self, denv):
return Utils.str_to_number(self.value)
class FloatNode(ConstNode):
type = PyrexTypes.c_double_type
def calculate_constant_result(self):
self.constant_result = float(self.value)
def compile_time_value(self, denv):
return float(self.value)
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_float:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.float_type)
if dst_type.is_float and self.type.is_pyobject:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=dst_type)
return ConstNode.coerce_to(self, dst_type, env)
def calculate_result_code(self):
return self.result_code
def get_constant_c_result_code(self):
strval = self.value
assert isinstance(strval, basestring)
cmpval = repr(float(strval))
if cmpval == 'nan':
return "(Py_HUGE_VAL * 0)"
elif cmpval == 'inf':
return "Py_HUGE_VAL"
elif cmpval == '-inf':
return "(-Py_HUGE_VAL)"
else:
return strval
def generate_evaluation_code(self, code):
c_value = self.get_constant_c_result_code()
if self.type.is_pyobject:
self.result_code = code.get_py_float(self.value, c_value)
else:
self.result_code = c_value
def _analyse_name_as_type(name, pos, env):
type = PyrexTypes.parse_basic_type(name)
if type is not None:
return type
global_entry = env.global_scope().lookup(name)
if global_entry and global_entry.is_type and global_entry.type:
return global_entry.type
from .TreeFragment import TreeFragment
with local_errors(ignore=True):
pos = (pos[0], pos[1], pos[2]-7)
try:
declaration = TreeFragment(u"sizeof(%s)" % name, name=pos[0].filename, initial_pos=pos)
except CompileError:
pass
else:
sizeof_node = declaration.root.stats[0].expr
if isinstance(sizeof_node, SizeofTypeNode):
sizeof_node = sizeof_node.analyse_types(env)
if isinstance(sizeof_node, SizeofTypeNode):
return sizeof_node.arg_type
return None
class BytesNode(ConstNode):
# A char* or bytes literal
#
# value BytesLiteral
is_string_literal = True
# start off as Python 'bytes' to support len() in O(1)
type = bytes_type
def calculate_constant_result(self):
self.constant_result = self.value
def as_sliced_node(self, start, stop, step=None):
value = StringEncoding.bytes_literal(self.value[start:stop:step], self.value.encoding)
return BytesNode(self.pos, value=value, constant_result=value)
def compile_time_value(self, denv):
return self.value.byteencode()
def analyse_as_type(self, env):
return _analyse_name_as_type(self.value.decode('ISO8859-1'), self.pos, env)
def can_coerce_to_char_literal(self):
return len(self.value) == 1
def coerce_to_boolean(self, env):
# This is special because testing a C char* for truth directly
# would yield the wrong result.
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def coerce_to(self, dst_type, env):
if self.type == dst_type:
return self
if dst_type.is_int:
if not self.can_coerce_to_char_literal():
error(self.pos, "Only single-character string literals can be coerced into ints.")
return self
if dst_type.is_unicode_char:
error(self.pos, "Bytes literals cannot coerce to Py_UNICODE/Py_UCS4, use a unicode literal instead.")
return self
return CharNode(self.pos, value=self.value,
constant_result=ord(self.value))
node = BytesNode(self.pos, value=self.value, constant_result=self.constant_result)
if dst_type.is_pyobject:
if dst_type in (py_object_type, Builtin.bytes_type):
node.type = Builtin.bytes_type
else:
self.check_for_coercion_error(dst_type, env, fail=True)
return node
elif dst_type in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
node.type = dst_type
return node
elif dst_type in (PyrexTypes.c_uchar_ptr_type, PyrexTypes.c_const_uchar_ptr_type, PyrexTypes.c_void_ptr_type):
node.type = (PyrexTypes.c_const_char_ptr_type if dst_type == PyrexTypes.c_const_uchar_ptr_type
else PyrexTypes.c_char_ptr_type)
return CastNode(node, dst_type)
elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
# Exclude the case of passing a C string literal into a non-const C++ string.
if not dst_type.is_cpp_class or dst_type.is_const:
node.type = dst_type
return node
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
result = code.get_py_string_const(self.value)
elif self.type.is_const:
result = code.get_string_const(self.value)
else:
# not const => use plain C string literal and cast to mutable type
literal = self.value.as_c_string_literal()
# C++ may require a cast
result = typecast(self.type, PyrexTypes.c_void_ptr_type, literal)
self.result_code = result
def get_constant_c_result_code(self):
return None # FIXME
def calculate_result_code(self):
return self.result_code
class UnicodeNode(ConstNode):
# A Py_UNICODE* or unicode literal
#
# value EncodedString
# bytes_value BytesLiteral the literal parsed as bytes string
# ('-3' unicode literals only)
is_string_literal = True
bytes_value = None
type = unicode_type
def calculate_constant_result(self):
self.constant_result = self.value
def analyse_as_type(self, env):
return _analyse_name_as_type(self.value, self.pos, env)
def as_sliced_node(self, start, stop, step=None):
if StringEncoding.string_contains_surrogates(self.value[:stop]):
# this is unsafe as it may give different results
# in different runtimes
return None
value = StringEncoding.EncodedString(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.bytes_value is not None:
bytes_value = StringEncoding.bytes_literal(
self.bytes_value[start:stop:step], self.bytes_value.encoding)
else:
bytes_value = None
return UnicodeNode(
self.pos, value=value, bytes_value=bytes_value,
constant_result=value)
def coerce_to(self, dst_type, env):
if dst_type is self.type:
pass
elif dst_type.is_unicode_char:
if not self.can_coerce_to_char_literal():
error(self.pos,
"Only single-character Unicode string literals or "
"surrogate pairs can be coerced into Py_UCS4/Py_UNICODE.")
return self
int_value = ord(self.value)
return IntNode(self.pos, type=dst_type, value=str(int_value),
constant_result=int_value)
elif not dst_type.is_pyobject:
if dst_type.is_string and self.bytes_value is not None:
# special case: '-3' enforced unicode literal used in a
# C char* context
return BytesNode(self.pos, value=self.bytes_value).coerce_to(dst_type, env)
if dst_type.is_pyunicode_ptr:
return UnicodeNode(self.pos, value=self.value, type=dst_type)
error(self.pos,
"Unicode literals do not support coercion to C types other "
"than Py_UNICODE/Py_UCS4 (for characters) or Py_UNICODE* "
"(for strings).")
elif dst_type not in (py_object_type, Builtin.basestring_type):
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return len(self.value) == 1
## or (len(self.value) == 2
## and (0xD800 <= self.value[0] <= 0xDBFF)
## and (0xDC00 <= self.value[1] <= 0xDFFF))
def coerce_to_boolean(self, env):
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def contains_surrogates(self):
return StringEncoding.string_contains_surrogates(self.value)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
# FIXME: this should go away entirely!
# Since string_contains_lone_surrogates() returns False for surrogate pairs in Py2/UCS2,
# Py2 can generate different code from Py3 here. Let's hope we get away with claiming that
# the processing of surrogate pairs in code was always ambiguous and lead to different results
# on P16/32bit Unicode platforms.
if StringEncoding.string_contains_lone_surrogates(self.value):
# lone (unpaired) surrogates are not really portable and cannot be
# decoded by the UTF-8 codec in Py3.3
self.result_code = code.get_py_const(py_object_type, 'ustring')
data_cname = code.get_string_const(
StringEncoding.BytesLiteral(self.value.encode('unicode_escape')))
const_code = code.get_cached_constants_writer(self.result_code)
if const_code is None:
return # already initialised
const_code.mark_pos(self.pos)
const_code.putln(
"%s = PyUnicode_DecodeUnicodeEscape(%s, sizeof(%s) - 1, NULL); %s" % (
self.result_code,
data_cname,
data_cname,
const_code.error_goto_if_null(self.result_code, self.pos)))
const_code.put_error_if_neg(
self.pos, "__Pyx_PyUnicode_READY(%s)" % self.result_code)
else:
self.result_code = code.get_py_string_const(self.value)
else:
self.result_code = code.get_pyunicode_ptr_const(self.value)
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class StringNode(PyConstNode):
# A Python str object, i.e. a byte string in Python 2.x and a
# unicode string in Python 3.x
#
# value BytesLiteral (or EncodedString with ASCII content)
# unicode_value EncodedString or None
# is_identifier boolean
type = str_type
is_string_literal = True
is_identifier = None
unicode_value = None
def calculate_constant_result(self):
if self.unicode_value is not None:
# only the Unicode value is portable across Py2/3
self.constant_result = self.unicode_value
def analyse_as_type(self, env):
return _analyse_name_as_type(self.unicode_value or self.value.decode('ISO8859-1'), self.pos, env)
def as_sliced_node(self, start, stop, step=None):
value = type(self.value)(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.unicode_value is not None:
if StringEncoding.string_contains_surrogates(self.unicode_value[:stop]):
# this is unsafe as it may give different results in different runtimes
return None
unicode_value = StringEncoding.EncodedString(
self.unicode_value[start:stop:step])
else:
unicode_value = None
return StringNode(
self.pos, value=value, unicode_value=unicode_value,
constant_result=value, is_identifier=self.is_identifier)
def coerce_to(self, dst_type, env):
if dst_type is not py_object_type and not str_type.subtype_of(dst_type):
# if dst_type is Builtin.bytes_type:
# # special case: bytes = 'str literal'
# return BytesNode(self.pos, value=self.value)
if not dst_type.is_pyobject:
return BytesNode(self.pos, value=self.value).coerce_to(dst_type, env)
if dst_type is not Builtin.basestring_type:
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return not self.is_identifier and len(self.value) == 1
def generate_evaluation_code(self, code):
self.result_code = code.get_py_string_const(
self.value, identifier=self.is_identifier, is_str=True,
unicode_value=self.unicode_value)
def get_constant_c_result_code(self):
return None
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
if self.value.is_unicode:
return self.value
if not IS_PYTHON3:
# use plain str/bytes object in Py2
return self.value.byteencode()
# in Py3, always return a Unicode string
if self.unicode_value is not None:
return self.unicode_value
return self.value.decode('iso8859-1')
class IdentifierStringNode(StringNode):
# A special str value that represents an identifier (bytes in Py2,
# unicode in Py3).
is_identifier = True
class ImagNode(AtomicExprNode):
# Imaginary number literal
#
# value string imaginary part (float value)
type = PyrexTypes.c_double_complex_type
def calculate_constant_result(self):
self.constant_result = complex(0.0, float(self.value))
def compile_time_value(self, denv):
return complex(0.0, float(self.value))
def analyse_types(self, env):
self.type.create_declaration_utility_code(env)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
node = ImagNode(self.pos, value=self.value)
if dst_type.is_pyobject:
node.is_temp = 1
node.type = Builtin.complex_type
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return AtomicExprNode.coerce_to(node, dst_type, env)
gil_message = "Constructing complex number"
def calculate_result_code(self):
if self.type.is_pyobject:
return self.result()
else:
return "%s(0, %r)" % (self.type.from_parts, float(self.value))
def generate_result_code(self, code):
if self.type.is_pyobject:
code.putln(
"%s = PyComplex_FromDoubles(0.0, %r); %s" % (
self.result(),
float(self.value),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
elif self.type.is_cyp_class:
self.generate_gotref(code)
class NewExprNode(AtomicExprNode):
# C++ new statement
#
# cppclass node c++ class to create
type = None
def infer_type(self, env):
type = self.cppclass.analyse_as_type(env)
if type is None or not type.is_cpp_class:
error(self.pos, "new operator can only be applied to a C++ class")
self.type = error_type
return
self.cpp_check(env)
if type.is_cyp_class:
type = PyrexTypes.cyp_class_qualified_type(type, 'iso~')
constructor = type.get_constructor(self.pos)
self.class_type = type
self.entry = constructor
self.type = constructor.type
return self.type
def analyse_types(self, env):
if self.type is None:
self.infer_type(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
pass
def calculate_result_code(self):
return "new " + self.class_type.empty_declaration_code()
class NameNode(AtomicExprNode):
# Reference to a local or global variable name.
#
# name string Python name of the variable
# entry Entry Symbol table entry
# type_entry Entry For extension type names, the original type entry
# cf_is_null boolean Is uninitialized before this node
# cf_maybe_null boolean Maybe uninitialized before this node
# allow_null boolean Don't raise UnboundLocalError
# nogil boolean Whether it is used in a nogil context
is_name = True
is_cython_module = False
cython_attribute = None
lhs_of_first_assignment = False # TODO: remove me
is_used_as_rvalue = 0
entry = None
type_entry = None
cf_maybe_null = True
cf_is_null = False
allow_null = False
nogil = False
inferred_type = None
def as_cython_attribute(self):
return self.cython_attribute
def type_dependencies(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is not None and self.entry.type.is_unspecified:
return (self,)
else:
return ()
def infer_type(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is None or self.entry.type is unspecified_type:
if self.inferred_type is not None:
return self.inferred_type
return py_object_type
elif (self.entry.type.is_extension_type or self.entry.type.is_builtin_type) and \
self.name == self.entry.type.name:
# Unfortunately the type attribute of type objects
# is used for the pointer to the type they represent.
return type_type
elif self.entry.type.is_cfunction:
if self.entry.scope.is_builtin_scope:
# special case: optimised builtin functions must be treated as Python objects
return py_object_type
else:
# special case: referring to a C function must return its pointer
return PyrexTypes.CPtrType(self.entry.type)
else:
# If entry is inferred as pyobject it's safe to use local
# NameNode's inferred_type.
if self.entry.type.is_pyobject and self.inferred_type:
# Overflow may happen if integer
if not (self.inferred_type.is_int and self.entry.might_overflow):
return self.inferred_type
return self.entry.type
def compile_time_value(self, denv):
try:
return denv.lookup(self.name)
except KeyError:
error(self.pos, "Compile-time name '%s' not defined" % self.name)
def get_constant_c_result_code(self):
if not self.entry or self.entry.type.is_pyobject:
return None
return self.entry.cname
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a builtin
# C function with a Python equivalent, manufacture a NameNode
# referring to the Python builtin.
#print "NameNode.coerce_to:", self.name, dst_type ###
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction:
var_entry = entry.as_variable
if var_entry:
if var_entry.is_builtin and var_entry.is_const:
var_entry = env.declare_builtin(var_entry.name, self.pos)
node = NameNode(self.pos, name = self.name)
node.entry = var_entry
node.analyse_rvalue_entry(env)
return node
return super(NameNode, self).coerce_to(dst_type, env)
def declare_from_annotation(self, env, as_target=False):
"""Implements PEP 526 annotation typing in a fairly relaxed way.
Annotations are ignored for global variables, Python class attributes and already declared variables.
String literals are allowed and ignored.
The ambiguous Python types 'int' and 'long' are ignored and the 'cython.int' form must be used instead.
"""
if not env.directives['annotation_typing']:
return
if env.is_module_scope or env.is_py_class_scope:
# annotations never create global cdef names and Python classes don't support them anyway
return
name = self.name
if self.entry or env.lookup_here(name) is not None:
# already declared => ignore annotation
return
annotation = self.annotation
if annotation.expr.is_string_literal:
# name: "description" => not a type, but still a declared variable or attribute
atype = None
else:
_, atype = annotation.analyse_type_annotation(env)
if atype is None:
atype = unspecified_type if as_target and env.directives['infer_types'] != False else py_object_type
if atype.is_fused and env.fused_to_specific:
atype = atype.specialize(env.fused_to_specific)
self.entry = env.declare_var(name, atype, self.pos, is_cdef=not as_target)
self.entry.annotation = annotation.expr
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
# Returns the module scope, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.as_module:
return entry.as_module
return None
def analyse_as_type(self, env):
if self.cython_attribute:
type = PyrexTypes.parse_basic_type(self.cython_attribute)
else:
type = PyrexTypes.parse_basic_type(self.name)
if type:
return type
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
return entry.type
else:
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type.
# Returns the extension type, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_target_declaration(self, env):
if not self.entry:
self.entry = env.lookup_here(self.name)
if not self.entry and self.annotation is not None:
# name : type = ...
self.declare_from_annotation(env, as_target=True)
if not self.entry:
if env.directives['warn.undeclared']:
warning(self.pos, "implicit declaration of '%s'" % self.name, 1)
if env.directives['infer_types'] != False:
type = unspecified_type
else:
type = py_object_type
self.entry = env.declare_var(self.name, type, self.pos)
if self.entry.is_declared_generic:
self.result_ctype = py_object_type
if self.entry.as_module:
# cimported modules namespace can shadow actual variables
self.entry.is_variable = 1
def analyse_types(self, env):
self.initialized_check = env.directives['initializedcheck']
entry = self.entry
if entry is None:
entry = env.lookup(self.name)
if not entry:
entry = env.declare_builtin(self.name, self.pos)
if entry and entry.is_builtin and entry.is_const:
self.is_literal = True
if not entry:
self.type = PyrexTypes.error_type
return self
self.entry = entry
entry.used = 1
if entry.type.is_buffer:
from . import Buffer
Buffer.used_buffer_aux_vars(entry)
self.analyse_rvalue_entry(env)
return self
def analyse_target_types(self, env):
self.analyse_entry(env, is_target=True)
entry = self.entry
if entry.is_cfunction and entry.as_variable:
# FIXME: unify "is_overridable" flags below
if (entry.is_overridable or entry.type.is_overridable) or not self.is_lvalue() and entry.fused_cfunction:
# We need this for assigning to cpdef names and for the fused 'def' TreeFragment
entry = self.entry = entry.as_variable
self.type = entry.type
if self.type.is_const:
error(self.pos, "Assignment to const '%s'" % self.name)
if self.type.is_reference:
error(self.pos, "Assignment to reference '%s'" % self.name)
if not self.is_lvalue():
error(self.pos, "Assignment to non-lvalue '%s'" % self.name)
self.type = PyrexTypes.error_type
entry.used = 1
if entry.type.is_buffer:
from . import Buffer
Buffer.used_buffer_aux_vars(entry)
return self
def analyse_rvalue_entry(self, env):
#print "NameNode.analyse_rvalue_entry:", self.name ###
#print "Entry:", self.entry.__dict__ ###
self.analyse_entry(env)
entry = self.entry
if entry.is_declared_generic:
self.result_ctype = py_object_type
if entry.is_pyglobal or entry.is_builtin:
if entry.is_builtin and entry.is_const:
self.is_temp = 0
else:
self.is_temp = 1
self.is_used_as_rvalue = 1
elif entry.type.is_memoryviewslice:
self.is_temp = False
self.is_used_as_rvalue = True
self.use_managed_ref = True
return self
def nogil_check(self, env):
self.nogil = True
if self.is_used_as_rvalue:
entry = self.entry
if entry.is_builtin:
if not entry.is_const: # cached builtins are ok
self.gil_error()
elif entry.is_pyglobal:
self.gil_error()
gil_message = "Accessing Python global or builtin"
def analyse_entry(self, env, is_target=False):
#print "NameNode.analyse_entry:", self.name ###
self.check_identifier_kind()
entry = self.entry
type = entry.type
if (not is_target and type.is_pyobject and self.inferred_type and
self.inferred_type.is_builtin_type):
# assume that type inference is smarter than the static entry
type = self.inferred_type
self.type = type
def check_identifier_kind(self):
# Check that this is an appropriate kind of name for use in an
# expression. Also finds the variable entry associated with
# an extension type.
entry = self.entry
if entry.is_type and entry.type.is_extension_type:
self.type_entry = entry
if entry.is_type and (entry.type.is_enum or entry.type.is_cpp_enum):
py_entry = Symtab.Entry(self.name, None, py_object_type)
py_entry.is_pyglobal = True
py_entry.scope = self.entry.scope
self.entry = py_entry
elif not (entry.is_const or entry.is_variable or
entry.is_builtin or entry.is_cfunction or
entry.is_cpp_class):
if self.entry.as_variable:
self.entry = self.entry.as_variable
elif not self.is_cython_module:
error(self.pos, "'%s' is not a constant, variable or function identifier" % self.name)
def is_cimported_module_without_shadow(self, env):
if self.is_cython_module or self.cython_attribute:
return False
entry = self.entry or env.lookup(self.name)
return entry.as_module and not entry.is_variable
def is_simple(self):
# If it's not a C variable, it'll be in a temp.
return 1
def may_be_none(self):
if self.cf_state and self.type and (self.type.is_pyobject or
self.type.is_memoryviewslice):
# guard against infinite recursion on self-dependencies
if getattr(self, '_none_checking', False):
# self-dependency - either this node receives a None
# value from *another* node, or it can not reference
# None at this point => safe to assume "not None"
return False
self._none_checking = True
# evaluate control flow state to see if there were any
# potential None values assigned to the node so far
may_be_none = False
for assignment in self.cf_state:
if assignment.rhs.may_be_none():
may_be_none = True
break
del self._none_checking
return may_be_none
return super(NameNode, self).may_be_none()
def nonlocally_immutable(self):
if ExprNode.nonlocally_immutable(self):
return True
entry = self.entry
if not entry or entry.in_closure:
return False
return entry.is_local or entry.is_arg or entry.is_builtin or entry.is_readonly
def calculate_target_results(self, env):
pass
def check_const(self):
entry = self.entry
if entry is not None and not (
entry.is_const or
entry.is_cfunction or
entry.is_builtin or
entry.type.is_const):
self.not_const()
return False
return True
def check_const_addr(self):
entry = self.entry
if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin):
self.addr_not_const()
return False
return True
def is_lvalue(self):
return (
self.entry.is_variable and
not self.entry.is_readonly
) or (
self.entry.is_cfunction and
self.entry.is_overridable
)
def is_addressable(self):
return self.entry.is_variable and not self.type.is_memoryviewslice
def is_ephemeral(self):
# Name nodes are never ephemeral, even if the
# result is in a temporary.
return 0
def calculate_result_code(self):
entry = self.entry
if not entry:
return "" # There was an error earlier
return entry.cname
def generate_result_code(self, code):
entry = self.entry
if entry is None:
return # There was an error earlier
if entry.utility_code:
code.globalstate.use_utility_code(entry.utility_code)
if entry.is_builtin and entry.is_const:
return # Lookup already cached
elif entry.is_pyclass_attr:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.is_builtin:
namespace = Naming.builtins_cname
else: # entry.is_pyglobal
namespace = entry.scope.namespace_cname
if not self.cf_is_null:
code.putln(
'%s = PyObject_GetItem(%s, %s);' % (
self.result(),
namespace,
interned_cname))
code.putln('if (unlikely(!%s)) {' % self.result())
code.putln('PyErr_Clear();')
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'__Pyx_GetModuleGlobalName(%s, %s);' % (
self.result(),
interned_cname))
if not self.cf_is_null:
code.putln("}")
code.putln(code.error_goto_if_null(self.result(), self.pos))
self.generate_gotref(code)
elif entry.is_builtin and not entry.scope.is_module_scope:
# known builtin
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetBuiltinName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
elif entry.is_pyglobal or (entry.is_builtin and entry.scope.is_module_scope):
# name in class body, global name or unknown builtin
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.scope.is_module_scope:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'__Pyx_GetModuleGlobalName(%s, %s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
else:
# FIXME: is_pyglobal is also used for class namespace
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
code.putln(
'__Pyx_GetNameInClass(%s, %s, %s); %s' % (
self.result(),
entry.scope.namespace_cname,
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
elif entry.type.is_cyp_class:
self.generate_gotref(code)
#pass
# code.putln(entry.cname)
elif entry.is_local or entry.in_closure or entry.from_closure or entry.type.is_memoryviewslice:
# Raise UnboundLocalError for objects and memoryviewslices
raise_unbound = (
(self.cf_maybe_null or self.cf_is_null) and not self.allow_null)
null_code = entry.type.check_for_null_code(entry.cname)
memslice_check = entry.type.is_memoryviewslice and self.initialized_check
if null_code and raise_unbound and (entry.type.is_pyobject or memslice_check):
code.put_error_if_unbound(self.pos, entry, self.in_nogil_context)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
#print "NameNode.generate_assignment_code:", self.name ###
entry = self.entry
if entry is None:
return # There was an error earlier
if (self.entry.type.is_ptr and isinstance(rhs, ListNode)
and not self.lhs_of_first_assignment and not rhs.in_module_scope):
error(self.pos, "Literal list must be assigned to pointer at time of declaration")
# is_pyglobal seems to be True for module level-globals only.
# We use this to access class->tp_dict if necessary.
if entry.is_pyglobal:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
namespace = self.entry.scope.namespace_cname
if entry.is_member:
# if the entry is a member we have to cheat: SetAttr does not work
# on types, so we create a descriptor which is then added to tp_dict
setter = 'PyDict_SetItem'
namespace = '%s->tp_dict' % namespace
elif entry.scope.is_module_scope:
setter = 'PyDict_SetItem'
namespace = Naming.moddict_cname
elif entry.is_pyclass_attr:
# Special-case setting __new__
n = "SetNewInClass" if self.name == "__new__" else "SetNameInClass"
code.globalstate.use_utility_code(UtilityCode.load_cached(n, "ObjectHandling.c"))
setter = '__Pyx_' + n
else:
assert False, repr(entry)
code.put_error_if_neg(
self.pos,
'%s(%s, %s, %s)' % (
setter,
namespace,
interned_cname,
rhs.py_result()))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating disposal code for %s" % rhs)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
if entry.is_member:
# in Py2.6+, we need to invalidate the method cache
code.putln("PyType_Modified(%s);" %
entry.scope.parent_type.typeptr_cname)
else:
if self.type.is_memoryviewslice:
self.generate_acquire_memoryviewslice(rhs, code)
elif self.type.is_buffer:
# Generate code for doing the buffer release/acquisition.
# This might raise an exception in which case the assignment (done
# below) will not happen.
#
# The reason this is not in a typetest-like node is because the
# variables that the acquired buffer info is stored to is allocated
# per entry and coupled with it.
self.generate_acquire_buffer(rhs, code)
assigned = False
if self.type.is_pyobject:
#print "NameNode.generate_assignment_code: to", self.name ###
#print "...from", rhs ###
#print "...LHS type", self.type, "ctype", self.ctype() ###
#print "...RHS type", rhs.type, "ctype", rhs.ctype() ###
if self.use_managed_ref:
rhs.make_owned_reference(code)
is_external_ref = entry.is_cglobal or self.entry.in_closure or self.entry.from_closure
if is_external_ref:
self.generate_gotref(code, handle_null=True)
assigned = True
if entry.is_cglobal:
self.generate_decref_set(code, rhs.result_as(self.ctype()))
else:
if not self.cf_is_null:
if self.cf_maybe_null:
self.generate_xdecref_set(code, rhs.result_as(self.ctype()))
else:
self.generate_decref_set(code, rhs.result_as(self.ctype()))
else:
assigned = False
if is_external_ref:
rhs.generate_giveref(code)
elif self.type.is_cyp_class:
if self.use_managed_ref:
rhs.make_owned_reference(code)
code.put_xdecref(self.result(), self.type)
if not self.type.is_memoryviewslice:
if not assigned:
if overloaded_assignment:
result = rhs.move_result_rhs()
if exception_check == '+':
translate_cpp_exception(
code, self.pos,
'%s = %s;' % (self.result(), result),
self.result() if self.type.is_pyobject else None,
exception_value, self.in_nogil_context)
else:
code.putln('%s = %s;' % (self.result(), result))
else:
result = rhs.move_result_rhs_as(self.ctype())
if is_pythran_expr(self.type):
code.putln('new (&%s) decltype(%s){%s};' % (self.result(), self.result(), result))
elif result != self.result():
code.putln('%s = %s;' % (self.result(), result))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating post-assignment code for %s" % rhs)
rhs.generate_post_assignment_code(code)
elif rhs.result_in_temp():
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
def generate_acquire_memoryviewslice(self, rhs, code):
"""
Slices, coercions from objects, return values etc are new references.
We have a borrowed reference in case of dst = src
"""
from . import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=self.result(),
lhs_type=self.type,
lhs_pos=self.pos,
rhs=rhs,
code=code,
have_gil=not self.in_nogil_context,
first_assignment=self.cf_is_null)
def generate_acquire_buffer(self, rhs, code):
# rhstmp is only used in case the rhs is a complicated expression leading to
# the object, to avoid repeating the same C expression for every reference
# to the rhs. It does NOT hold a reference.
pretty_rhs = isinstance(rhs, NameNode) or rhs.is_temp
if pretty_rhs:
rhstmp = rhs.result_as(self.ctype())
else:
rhstmp = code.funcstate.allocate_temp(self.entry.type, manage_ref=False)
code.putln('%s = %s;' % (rhstmp, rhs.result_as(self.ctype())))
from . import Buffer
Buffer.put_assign_to_buffer(self.result(), rhstmp, self.entry,
is_initialized=not self.lhs_of_first_assignment,
pos=self.pos, code=code)
if not pretty_rhs:
code.putln("%s = 0;" % rhstmp)
code.funcstate.release_temp(rhstmp)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if self.entry is None:
return # There was an error earlier
elif self.entry.is_pyclass_attr:
namespace = self.entry.scope.namespace_cname
interned_cname = code.intern_identifier(self.entry.name)
if ignore_nonexisting:
key_error_code = 'PyErr_Clear(); else'
else:
# minor hack: fake a NameError on KeyError
key_error_code = (
'{ PyErr_Clear(); PyErr_Format(PyExc_NameError, "name \'%%s\' is not defined", "%s"); }' %
self.entry.name)
code.putln(
'if (unlikely(PyObject_DelItem(%s, %s) < 0)) {'
' if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) %s'
' %s '
'}' % (namespace, interned_cname,
key_error_code,
code.error_goto(self.pos)))
elif self.entry.is_pyglobal:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
interned_cname = code.intern_identifier(self.entry.name)
del_code = '__Pyx_PyObject_DelAttrStr(%s, %s)' % (
Naming.module_cname, interned_cname)
if ignore_nonexisting:
code.putln(
'if (unlikely(%s < 0)) {'
' if (likely(PyErr_ExceptionMatches(PyExc_AttributeError))) PyErr_Clear(); else %s '
'}' % (del_code, code.error_goto(self.pos)))
else:
code.put_error_if_neg(self.pos, del_code)
elif self.entry.type.is_pyobject or self.entry.type.is_memoryviewslice or self.entry.type.is_cyp_class:
if not self.cf_is_null:
if self.cf_maybe_null and not ignore_nonexisting:
code.put_error_if_unbound(self.pos, self.entry)
if self.entry.in_closure:
# generator
self.generate_gotref(code, handle_null=True, maybe_null_extra_check=ignore_nonexisting)
if ignore_nonexisting and self.cf_maybe_null:
code.put_xdecref_clear(self.result(), self.ctype(),
have_gil=not self.nogil)
else:
code.put_decref_clear(self.result(), self.ctype(),
have_gil=not self.nogil)
else:
error(self.pos, "Deletion of C names not supported")
def annotate(self, code):
if getattr(self, 'is_called', False):
pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1)
if self.type.is_pyobject:
style, text = 'py_call', 'python function (%s)'
else:
style, text = 'c_call', 'c function (%s)'
code.annotate(pos, AnnotationItem(style, text % self.type, size=len(self.name)))
class BackquoteNode(ExprNode):
# `expr`
#
# arg ExprNode
type = py_object_type
subexprs = ['arg']
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.is_temp = 1
return self
gil_message = "Backquote expression"
def calculate_constant_result(self):
self.constant_result = repr(self.arg.constant_result)
def generate_result_code(self, code):
code.putln(
"%s = PyObject_Repr(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
class ImportNode(ExprNode):
# Used as part of import statement implementation.
# Implements result =
# __import__(module_name, globals(), None, name_list, level)
#
# module_name StringNode dotted name of module. Empty module
# name means importing the parent package according
# to level
# name_list ListNode or None list of names to be imported
# level int relative import level:
# -1: attempt both relative import and absolute import;
# 0: absolute import;
# >0: the number of parent directories to search
# relative to the current module.
# None: decide the level according to language level and
# directives
# get_top_level_module int true: return top-level module, false: return imported module
# module_names TupleNode the separate names of the module and submodules, or None
type = py_object_type
module_names = None
get_top_level_module = False
is_temp = True
subexprs = ['module_name', 'name_list', 'module_names']
def analyse_types(self, env):
if self.level is None:
# For modules in packages, and without 'absolute_import' enabled, try relative (Py2) import first.
if env.global_scope().parent_module and (
env.directives['py2_import'] or
Future.absolute_import not in env.global_scope().context.future_directives):
self.level = -1
else:
self.level = 0
module_name = self.module_name.analyse_types(env)
self.module_name = module_name.coerce_to_pyobject(env)
assert self.module_name.is_string_literal
if self.name_list:
name_list = self.name_list.analyse_types(env)
self.name_list = name_list.coerce_to_pyobject(env)
elif '.' in self.module_name.value:
self.module_names = TupleNode(self.module_name.pos, args=[
IdentifierStringNode(self.module_name.pos, value=part, constant_result=part)
for part in map(StringEncoding.EncodedString, self.module_name.value.split('.'))
]).analyse_types(env)
return self
gil_message = "Python import"
def generate_result_code(self, code):
assert self.module_name.is_string_literal
module_name = self.module_name.value
if self.level == 0 and not self.name_list and not self.get_top_level_module:
if self.module_names:
assert self.module_names.is_literal # make sure we create the tuple only once
code.globalstate.use_utility_code(UtilityCode.load_cached("ImportDottedModule", "ImportExport.c"))
import_code = "__Pyx_ImportDottedModule(%s, %s)" % (
self.module_name.py_result(),
self.module_names.py_result() if self.module_names else 'NULL',
)
else:
code.globalstate.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c"))
import_code = "__Pyx_Import(%s, %s, %d)" % (
self.module_name.py_result(),
self.name_list.py_result() if self.name_list else '0',
self.level)
if self.level <= 0 and module_name in utility_code_for_imports:
helper_func, code_name, code_file = utility_code_for_imports[module_name]
code.globalstate.use_utility_code(UtilityCode.load_cached(code_name, code_file))
import_code = '%s(%s)' % (helper_func, import_code)
code.putln("%s = %s; %s" % (
self.result(),
import_code,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
class IteratorNode(ExprNode):
# Used as part of for statement implementation.
#
# Implements result = iter(sequence)
#
# sequence ExprNode
type = py_object_type
iter_func_ptr = None
counter_cname = None
reversed = False # currently only used for list/tuple types (see Optimize.py)
is_async = False
subexprs = ['sequence']
def analyse_types(self, env):
self.sequence = self.sequence.analyse_types(env)
if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \
not self.sequence.type.is_string:
# C array iteration will be transformed later on
self.type = self.sequence.type
elif self.sequence.type.is_cpp_class:
return CppIteratorNode(self.pos, sequence=self.sequence).analyse_types(env)
else:
self.sequence = self.sequence.coerce_to_pyobject(env)
if self.sequence.type in (list_type, tuple_type):
self.sequence = self.sequence.as_none_safe_node("'NoneType' object is not iterable")
self.is_temp = 1
return self
gil_message = "Iterating over Python object"
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def type_dependencies(self, env):
return self.sequence.type_dependencies(env)
def infer_type(self, env):
sequence_type = self.sequence.infer_type(env)
if sequence_type.is_array or sequence_type.is_ptr:
return sequence_type
elif sequence_type.is_cpp_class:
begin = sequence_type.scope.lookup("begin")
if begin is not None:
return begin.type.return_type
elif sequence_type.is_pyobject:
return sequence_type
return py_object_type
def generate_result_code(self, code):
sequence_type = self.sequence.type
if sequence_type.is_cpp_class:
assert False, "Should have been changed to CppIteratorNode"
if sequence_type.is_array or sequence_type.is_ptr:
raise InternalError("for in carray slice not transformed")
is_builtin_sequence = sequence_type in (list_type, tuple_type)
if not is_builtin_sequence:
# reversed() not currently optimised (see Optimize.py)
assert not self.reversed, "internal error: reversed() only implemented for list/tuple objects"
self.may_be_a_sequence = not sequence_type.is_builtin_type
if self.may_be_a_sequence:
code.putln(
"if (likely(PyList_CheckExact(%s)) || PyTuple_CheckExact(%s)) {" % (
self.sequence.py_result(),
self.sequence.py_result()))
if is_builtin_sequence or self.may_be_a_sequence:
self.counter_cname = code.funcstate.allocate_temp(
PyrexTypes.c_py_ssize_t_type, manage_ref=False)
if self.reversed:
if sequence_type is list_type:
init_value = 'PyList_GET_SIZE(%s) - 1' % self.result()
else:
init_value = 'PyTuple_GET_SIZE(%s) - 1' % self.result()
else:
init_value = '0'
code.putln("%s = %s; __Pyx_INCREF(%s); %s = %s;" % (
self.result(),
self.sequence.py_result(),
self.result(),
self.counter_cname,
init_value))
if not is_builtin_sequence:
self.iter_func_ptr = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
if self.may_be_a_sequence:
code.putln("%s = NULL;" % self.iter_func_ptr)
code.putln("} else {")
code.put("%s = -1; " % self.counter_cname)
code.putln("%s = PyObject_GetIter(%s); %s" % (
self.result(),
self.sequence.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
# PyObject_GetIter() fails if "tp_iternext" is not set, but the check below
# makes it visible to the C compiler that the pointer really isn't NULL, so that
# it can distinguish between the special cases and the generic case
code.putln("%s = __Pyx_PyObject_GetIterNextFunc(%s); %s" % (
self.iter_func_ptr, self.py_result(),
code.error_goto_if_null(self.iter_func_ptr, self.pos)))
if self.may_be_a_sequence:
code.putln("}")
def generate_next_sequence_item(self, test_name, result_name, code):
assert self.counter_cname, "internal error: counter_cname temp not prepared"
final_size = 'Py%s_GET_SIZE(%s)' % (test_name, self.py_result())
if self.sequence.is_sequence_constructor:
item_count = len(self.sequence.args)
if self.sequence.mult_factor is None:
final_size = item_count
elif isinstance(self.sequence.mult_factor.constant_result, _py_int_types):
final_size = item_count * self.sequence.mult_factor.constant_result
code.putln("if (%s >= %s) break;" % (self.counter_cname, final_size))
if self.reversed:
inc_dec = '--'
else:
inc_dec = '++'
code.putln("#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS")
code.putln(
"%s = Py%s_GET_ITEM(%s, %s); __Pyx_INCREF(%s); %s%s; %s" % (
result_name,
test_name,
self.py_result(),
self.counter_cname,
result_name,
self.counter_cname,
inc_dec,
# use the error label to avoid C compiler warnings if we only use it below
code.error_goto_if_neg('0', self.pos)
))
code.putln("#else")
code.putln(
"%s = PySequence_ITEM(%s, %s); %s%s; %s" % (
result_name,
self.py_result(),
self.counter_cname,
self.counter_cname,
inc_dec,
code.error_goto_if_null(result_name, self.pos)))
code.put_gotref(result_name, py_object_type)
code.putln("#endif")
def generate_iter_next_result_code(self, result_name, code):
sequence_type = self.sequence.type
if self.reversed:
code.putln("if (%s < 0) break;" % self.counter_cname)
if sequence_type is list_type:
self.generate_next_sequence_item('List', result_name, code)
return
elif sequence_type is tuple_type:
self.generate_next_sequence_item('Tuple', result_name, code)
return
if self.may_be_a_sequence:
code.putln("if (likely(!%s)) {" % self.iter_func_ptr)
code.putln("if (likely(PyList_CheckExact(%s))) {" % self.py_result())
self.generate_next_sequence_item('List', result_name, code)
code.putln("} else {")
self.generate_next_sequence_item('Tuple', result_name, code)
code.putln("}")
code.put("} else ")
code.putln("{")
code.putln(
"%s = %s(%s);" % (
result_name,
self.iter_func_ptr,
self.py_result()))
code.putln("if (unlikely(!%s)) {" % result_name)
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
code.putln("break;")
code.putln("}")
code.put_gotref(result_name, py_object_type)
code.putln("}")
def free_temps(self, code):
if self.counter_cname:
code.funcstate.release_temp(self.counter_cname)
if self.iter_func_ptr:
code.funcstate.release_temp(self.iter_func_ptr)
self.iter_func_ptr = None
ExprNode.free_temps(self, code)
class CppIteratorNode(ExprNode):
# Iteration over a C++ container.
# Created at the analyse_types stage by IteratorNode
cpp_sequence_cname = None
cpp_attribute_op = "."
is_temp = True
subexprs = ['sequence']
def analyse_types(self, env):
sequence_type = self.sequence.type
if sequence_type.is_ptr:
sequence_type = sequence_type.base_type
if sequence_type.is_const or sequence_type.is_const_cyp_class:
begin = sequence_type.scope.lookup("const_begin")
end = sequence_type.scope.lookup("const_end")
if not begin or begin.cname != "begin":
begin = sequence_type.scope.lookup("begin")
if not end or end.cname != "end":
end = sequence_type.scope.lookup("end")
else:
begin = sequence_type.scope.lookup("begin")
end = sequence_type.scope.lookup("end")
if (begin is None
or not begin.type.is_cfunction
or begin.type.args):
error(self.pos, "missing begin() on %s" % self.sequence.type)
self.type = error_type
return self
if (end is None
or not end.type.is_cfunction
or end.type.args):
error(self.pos, "missing end() on %s" % self.sequence.type)
self.type = error_type
return self
iter_type = begin.type.return_type
if iter_type.is_cpp_class:
if env.lookup_operator_for_types(
self.pos,
"!=",
[iter_type, end.type.return_type]) is None:
error(self.pos, "missing operator!= on result of begin() on %s" % self.sequence.type)
self.type = error_type
return self
if env.lookup_operator_for_types(self.pos, '++', [iter_type]) is None:
error(self.pos, "missing operator++ on result of begin() on %s" % self.sequence.type)
self.type = error_type
return self
if env.lookup_operator_for_types(self.pos, '*', [iter_type]) is None:
error(self.pos, "missing operator* on result of begin() on %s" % self.sequence.type)
self.type = error_type
return self
self.type = iter_type
elif iter_type.is_ptr:
if not (iter_type == end.type.return_type):
error(self.pos, "incompatible types for begin() and end()")
self.type = iter_type
else:
error(self.pos, "result type of begin() on %s must be a C++ class or pointer" % self.sequence.type)
self.type = error_type
return self
def generate_result_code(self, code):
sequence_type = self.sequence.type
if sequence_type.is_cyp_class:
self.cpp_attribute_op = "->"
# essentially 3 options:
if self.sequence.is_simple():
# 1) is a name and can be accessed directly;
# assigning to it may break the container, but that's the responsibility
# of the user
code.putln("%s = %s%sbegin();" % (self.result(),
self.sequence.result(),
self.cpp_attribute_op))
else:
# (while it'd be nice to limit the scope of the loop temp, it's essentially
# impossible to do while supporting generators)
temp_type = sequence_type
if temp_type.is_reference:
# 2) Sequence is a reference (often obtained by dereferencing a pointer);
# make the temp a pointer so we are not sensitive to users reassigning
# the pointer than it came from
temp_type = PyrexTypes.CPtrType(sequence_type.ref_base_type)
if temp_type.is_ptr:
self.cpp_attribute_op = "->"
# 3) (otherwise) sequence comes from a function call or similar, so we must
# create a temp to store it in
self.cpp_sequence_cname = code.funcstate.allocate_temp(temp_type, manage_ref=temp_type.is_cyp_class)
code.putln("%s = %s%s;" % (self.cpp_sequence_cname,
"&" if temp_type.is_ptr else "",
self.sequence.move_result_rhs()))
if sequence_type.is_cyp_class:
code.put_incref(self.cpp_sequence_cname, sequence_type)
code.putln("%s = %s%sbegin();" % (self.result(), self.cpp_sequence_cname,
self.cpp_attribute_op))
def generate_iter_next_result_code(self, result_name, code):
# end call isn't cached to support containers that allow adding while iterating
# (much as this is usually a bad idea)
code.putln("if (!(%s != %s%send())) break;" % (
self.result(),
self.cpp_sequence_cname or self.sequence.result(),
self.cpp_attribute_op))
code.putln("%s = *%s;" % (
result_name,
self.result()))
code.putln("++%s;" % self.result())
def generate_subexpr_disposal_code(self, code):
if not self.cpp_sequence_cname:
# the sequence is accessed directly so any temporary result in its
# subexpressions must remain available until the iterator is not needed
return
ExprNode.generate_subexpr_disposal_code(self, code)
def free_subexpr_temps(self, code):
if not self.cpp_sequence_cname:
# the sequence is accessed directly so any temporary result in its
# subexpressions must remain available until the iterator is not needed
return
ExprNode.free_subexpr_temps(self, code)
def generate_disposal_code(self, code):
# clean-up the iterator by assigning end to it
# this is only required if the iterator holds resources that must be released once iteration is complete
code.putln("%s = %s%send();" % (
self.result(),
self.cpp_sequence_cname or self.sequence.result(),
self.cpp_attribute_op))
if self.cpp_sequence_cname and self.sequence.type.is_cyp_class:
code.put_decref(self.cpp_sequence_cname, self.sequence.type)
if not self.cpp_sequence_cname:
# postponed from CppIteratorNode.generate_subexpr_disposal_code
# and CppIteratorNode.free_subexpr_temps
ExprNode.generate_subexpr_disposal_code(self, code)
ExprNode.free_subexpr_temps(self, code)
# Adapt relevant bits from ExprNode.generate_disposal_code
if self.result() and self.type.is_cyp_class:
code.put_xdecref_clear(self.result(), self.ctype(), have_gil=not self.in_nogil_context)
if self.has_temp_moved:
code.globalstate.use_utility_code(
UtilityCode.load_cached("MoveIfSupported", "CppSupport.cpp"))
else:
ExprNode.generate_disposal_code(self, code)
def free_temps(self, code):
if self.cpp_sequence_cname:
code.funcstate.release_temp(self.cpp_sequence_cname)
ExprNode.free_temps(self, code)
class NextNode(AtomicExprNode):
# Used as part of for statement implementation.
# Implements result = next(iterator)
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
def __init__(self, iterator):
AtomicExprNode.__init__(self, iterator.pos)
self.iterator = iterator
def nogil_check(self, env):
# ignore - errors (if any) are already handled by IteratorNode
pass
def type_dependencies(self, env):
return self.iterator.type_dependencies(env)
def infer_type(self, env, iterator_type=None):
if iterator_type is None:
iterator_type = self.iterator.infer_type(env)
if iterator_type.is_ptr or iterator_type.is_array:
return iterator_type.base_type
elif iterator_type.is_cpp_class:
item_type = env.lookup_operator_for_types(self.pos, "*", [iterator_type]).type.return_type
if item_type.is_reference:
item_type = item_type.ref_base_type
if item_type.is_cv_qualified:
item_type = item_type.cv_base_type
return item_type
else:
# Avoid duplication of complicated logic.
fake_index_node = IndexNode(
self.pos,
base=self.iterator.sequence,
index=IntNode(self.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
return fake_index_node.infer_type(env)
def analyse_types(self, env):
self.type = self.infer_type(env, self.iterator.type)
self.is_temp = 1
return self
def generate_result_code(self, code):
self.iterator.generate_iter_next_result_code(self.result(), code)
class AsyncIteratorNode(ExprNode):
# Used as part of 'async for' statement implementation.
#
# Implements result = sequence.__aiter__()
#
# sequence ExprNode
subexprs = ['sequence']
is_async = True
type = py_object_type
is_temp = 1
def infer_type(self, env):
return py_object_type
def analyse_types(self, env):
self.sequence = self.sequence.analyse_types(env)
if not self.sequence.type.is_pyobject:
error(self.pos, "async for loops not allowed on C/C++ types")
self.sequence = self.sequence.coerce_to_pyobject(env)
return self
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c"))
code.putln("%s = __Pyx_Coroutine_GetAsyncIter(%s); %s" % (
self.result(),
self.sequence.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
class AsyncNextNode(AtomicExprNode):
# Used as part of 'async for' statement implementation.
# Implements result = iterator.__anext__()
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
type = py_object_type
is_temp = 1
def __init__(self, iterator):
AtomicExprNode.__init__(self, iterator.pos)
self.iterator = iterator
def infer_type(self, env):
return py_object_type
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c"))
code.putln("%s = __Pyx_Coroutine_AsyncIterNext(%s); %s" % (
self.result(),
self.iterator.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
class WithExitCallNode(ExprNode):
# The __exit__() call of a 'with' statement. Used in both the
# except and finally clauses.
# with_stat WithStatNode the surrounding 'with' statement
# args TupleNode or ResultStatNode the exception info tuple
# await_expr AwaitExprNode the await expression of an 'async with' statement
subexprs = ['args', 'await_expr']
test_if_run = True
await_expr = None
def analyse_types(self, env):
self.args = self.args.analyse_types(env)
if self.await_expr:
self.await_expr = self.await_expr.analyse_types(env)
self.type = PyrexTypes.c_bint_type
self.is_temp = True
return self
def generate_evaluation_code(self, code):
if self.test_if_run:
# call only if it was not already called (and decref-cleared)
code.putln("if (%s) {" % self.with_stat.exit_var)
self.args.generate_evaluation_code(code)
result_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.mark_pos(self.pos)
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_Call(%s, %s, NULL);" % (
result_var,
self.with_stat.exit_var,
self.args.result()))
code.put_decref_clear(self.with_stat.exit_var, type=py_object_type)
self.args.generate_disposal_code(code)
self.args.free_temps(code)
code.putln(code.error_goto_if_null(result_var, self.pos))
code.put_gotref(result_var, py_object_type)
if self.await_expr:
# FIXME: result_var temp currently leaks into the closure
self.await_expr.generate_evaluation_code(code, source_cname=result_var, decref_source=True)
code.putln("%s = %s;" % (result_var, self.await_expr.py_result()))
self.await_expr.generate_post_assignment_code(code)
self.await_expr.free_temps(code)
if self.result_is_used:
self.allocate_temp_result(code)
code.putln("%s = __Pyx_PyObject_IsTrue(%s);" % (self.result(), result_var))
code.put_decref_clear(result_var, type=py_object_type)
if self.result_is_used:
code.put_error_if_neg(self.pos, self.result())
code.funcstate.release_temp(result_var)
if self.test_if_run:
code.putln("}")
class ExcValueNode(AtomicExprNode):
# Node created during analyse_types phase
# of an ExceptClauseNode to fetch the current
# exception value.
type = py_object_type
def __init__(self, pos):
ExprNode.__init__(self, pos)
def set_var(self, var):
self.var = var
def calculate_result_code(self):
return self.var
def generate_result_code(self, code):
pass
def analyse_types(self, env):
return self
class TempNode(ExprNode):
# Node created during analyse_types phase
# of some nodes to hold a temporary value.
#
# Note: One must call "allocate" and "release" on
# the node during code generation to get/release the temp.
# This is because the temp result is often used outside of
# the regular cycle.
subexprs = []
def __init__(self, pos, type, env=None):
ExprNode.__init__(self, pos)
self.type = type
if type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
def analyse_types(self, env):
return self
def analyse_target_declaration(self, env):
pass
def generate_result_code(self, code):
pass
def allocate(self, code):
self.temp_cname = code.funcstate.allocate_temp(self.type, manage_ref=True)
def release(self, code):
code.funcstate.release_temp(self.temp_cname)
self.temp_cname = None
def result(self):
try:
return self.temp_cname
except:
assert False, "Remember to call allocate/release on TempNode"
raise
# Do not participate in normal temp alloc/dealloc:
def allocate_temp_result(self, code):
pass
def release_temp_result(self, code):
pass
class PyTempNode(TempNode):
# TempNode holding a Python value.
def __init__(self, pos, env):
TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
class RawCNameExprNode(ExprNode):
subexprs = []
def __init__(self, pos, type=None, cname=None):
ExprNode.__init__(self, pos, type=type)
if cname is not None:
self.cname = cname
def analyse_types(self, env):
return self
def set_cname(self, cname):
self.cname = cname
def result(self):
return self.cname
def generate_result_code(self, code):
pass
#-------------------------------------------------------------------
#
# F-strings
#
#-------------------------------------------------------------------
class JoinedStrNode(ExprNode):
# F-strings
#
# values [UnicodeNode|FormattedValueNode] Substrings of the f-string
#
type = unicode_type
is_temp = True
gil_message = "String concatenation"
subexprs = ['values']
def analyse_types(self, env):
self.values = [v.analyse_types(env).coerce_to_pyobject(env) for v in self.values]
return self
def may_be_none(self):
# PyUnicode_Join() always returns a Unicode string or raises an exception
return False
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
num_items = len(self.values)
list_var = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
ulength_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
max_char_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ucs4_type, manage_ref=False)
code.putln('%s = PyTuple_New(%s); %s' % (
list_var,
num_items,
code.error_goto_if_null(list_var, self.pos)))
code.put_gotref(list_var, py_object_type)
code.putln("%s = 0;" % ulength_var)
code.putln("%s = 127;" % max_char_var) # at least ASCII character range
for i, node in enumerate(self.values):
node.generate_evaluation_code(code)
node.make_owned_reference(code)
ulength = "__Pyx_PyUnicode_GET_LENGTH(%s)" % node.py_result()
max_char_value = "__Pyx_PyUnicode_MAX_CHAR_VALUE(%s)" % node.py_result()
is_ascii = False
if isinstance(node, UnicodeNode):
try:
# most strings will be ASCII or at least Latin-1
node.value.encode('iso8859-1')
max_char_value = '255'
node.value.encode('us-ascii')
is_ascii = True
except UnicodeEncodeError:
if max_char_value != '255':
# not ISO8859-1 => check BMP limit
max_char = max(map(ord, node.value))
if max_char < 0xD800:
# BMP-only, no surrogate pairs used
max_char_value = '65535'
ulength = str(len(node.value))
elif max_char >= 65536:
# cleary outside of BMP, and not on a 16-bit Unicode system
max_char_value = '1114111'
ulength = str(len(node.value))
else:
# not really worth implementing a check for surrogate pairs here
# drawback: C code can differ when generating on Py2 with 2-byte Unicode
pass
else:
ulength = str(len(node.value))
elif isinstance(node, FormattedValueNode) and node.value.type.is_numeric:
is_ascii = True # formatted C numbers are always ASCII
if not is_ascii:
code.putln("%s = (%s > %s) ? %s : %s;" % (
max_char_var, max_char_value, max_char_var, max_char_value, max_char_var))
code.putln("%s += %s;" % (ulength_var, ulength))
node.generate_giveref(code)
code.putln('PyTuple_SET_ITEM(%s, %s, %s);' % (list_var, i, node.py_result()))
node.generate_post_assignment_code(code)
node.free_temps(code)
code.mark_pos(self.pos)
self.allocate_temp_result(code)
code.globalstate.use_utility_code(UtilityCode.load_cached("JoinPyUnicode", "StringTools.c"))
code.putln('%s = __Pyx_PyUnicode_Join(%s, %d, %s, %s); %s' % (
self.result(),
list_var,
num_items,
ulength_var,
max_char_var,
code.error_goto_if_null(self.py_result(), self.pos)))
self.generate_gotref(code)
code.put_decref_clear(list_var, py_object_type)
code.funcstate.release_temp(list_var)
code.funcstate.release_temp(ulength_var)
code.funcstate.release_temp(max_char_var)
class FormattedValueNode(ExprNode):
# {}-delimited portions of an f-string
#
# value ExprNode The expression itself
# conversion_char str or None Type conversion (!s, !r, !a, or none, or 'd' for integer conversion)
# format_spec JoinedStrNode or None Format string passed to __format__
# c_format_spec str or None If not None, formatting can be done at the C level
subexprs = ['value', 'format_spec']
type = unicode_type
is_temp = True
c_format_spec = None
gil_message = "String formatting"
find_conversion_func = {
's': 'PyObject_Unicode',
'r': 'PyObject_Repr',
'a': 'PyObject_ASCII', # NOTE: mapped to PyObject_Repr() in Py2
'd': '__Pyx_PyNumber_IntOrLong', # NOTE: internal mapping for '%d' formatting
}.get
def may_be_none(self):
# PyObject_Format() always returns a Unicode string or raises an exception
return False
def analyse_types(self, env):
self.value = self.value.analyse_types(env)
if not self.format_spec or self.format_spec.is_string_literal:
c_format_spec = self.format_spec.value if self.format_spec else self.value.type.default_format_spec
if self.value.type.can_coerce_to_pystring(env, format_spec=c_format_spec):
self.c_format_spec = c_format_spec
if self.format_spec:
self.format_spec = self.format_spec.analyse_types(env).coerce_to_pyobject(env)
if self.c_format_spec is None:
self.value = self.value.coerce_to_pyobject(env)
if not self.format_spec and (not self.conversion_char or self.conversion_char == 's'):
if self.value.type is unicode_type and not self.value.may_be_none():
# value is definitely a unicode string and we don't format it any special
return self.value
return self
def generate_result_code(self, code):
if self.c_format_spec is not None and not self.value.type.is_pyobject:
convert_func_call = self.value.type.convert_to_pystring(
self.value.result(), code, self.c_format_spec)
code.putln("%s = %s; %s" % (
self.result(),
convert_func_call,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
return
value_result = self.value.py_result()
value_is_unicode = self.value.type is unicode_type and not self.value.may_be_none()
if self.format_spec:
format_func = '__Pyx_PyObject_Format'
format_spec = self.format_spec.py_result()
else:
# common case: expect simple Unicode pass-through if no format spec
format_func = '__Pyx_PyObject_FormatSimple'
# passing a Unicode format string in Py2 forces PyObject_Format() to also return a Unicode string
format_spec = Naming.empty_unicode
conversion_char = self.conversion_char
if conversion_char == 's' and value_is_unicode:
# no need to pipe unicode strings through str()
conversion_char = None
if conversion_char:
fn = self.find_conversion_func(conversion_char)
assert fn is not None, "invalid conversion character found: '%s'" % conversion_char
value_result = '%s(%s)' % (fn, value_result)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectFormatAndDecref", "StringTools.c"))
format_func += 'AndDecref'
elif self.format_spec:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectFormat", "StringTools.c"))
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectFormatSimple", "StringTools.c"))
code.putln("%s = %s(%s, %s); %s" % (
self.result(),
format_func,
value_result,
format_spec,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
#-------------------------------------------------------------------
#
# Parallel nodes (cython.parallel.thread(savailable|id))
#
#-------------------------------------------------------------------
class ParallelThreadsAvailableNode(AtomicExprNode):
"""
Note: this is disabled and not a valid directive at this moment
Implements cython.parallel.threadsavailable(). If we are called from the
sequential part of the application, we need to call omp_get_max_threads(),
and in the parallel part we can just call omp_get_num_threads()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("if (omp_in_parallel()) %s = omp_get_max_threads();" %
self.temp_code)
code.putln("else %s = omp_get_num_threads();" % self.temp_code)
code.putln("#else")
code.putln("%s = 1;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
class ParallelThreadIdNode(AtomicExprNode): #, Nodes.ParallelNode):
"""
Implements cython.parallel.threadid()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("%s = omp_get_thread_num();" % self.temp_code)
code.putln("#else")
code.putln("%s = 0;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
#-------------------------------------------------------------------
#
# Trailer nodes
#
#-------------------------------------------------------------------
class _IndexingBaseNode(ExprNode):
# Base class for indexing nodes.
#
# base ExprNode the value being indexed
def is_ephemeral(self):
# in most cases, indexing will return a safe reference to an object in a container,
# so we consider the result safe if the base object is
return self.base.is_ephemeral() or self.base.type in (
basestring_type, str_type, bytes_type, bytearray_type, unicode_type)
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
def is_lvalue(self):
# NOTE: references currently have both is_reference and is_ptr
# set. Since pointers and references have different lvalue
# rules, we must be careful to separate the two.
if self.type.is_reference:
if self.type.ref_base_type.is_array:
# fixed-sized arrays aren't l-values
return False
elif self.type.is_ptr:
# non-const pointers can always be reassigned
return True
# Just about everything else returned by the index operator
# can be an lvalue.
return True
class IndexNode(_IndexingBaseNode):
# Sequence indexing.
#
# base ExprNode
# index ExprNode
# type_indices [PyrexType]
#
# is_fused_index boolean Whether the index is used to specialize a
# c(p)def function
subexprs = ['base', 'index']
type_indices = None
is_subscript = True
is_fused_index = False
def calculate_constant_result(self):
self.constant_result = self.base.constant_result[self.index.constant_result]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
index = self.index.compile_time_value(denv)
try:
return base[index]
except Exception as e:
self.compile_time_value_error(e)
def is_simple(self):
base = self.base
return (base.is_simple() and self.index.is_simple()
and base.type and (base.type.is_ptr or base.type.is_array))
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if isinstance(self.index, SliceNode):
# slicing!
if base_type in (bytes_type, bytearray_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def analyse_target_declaration(self, env):
pass
def analyse_as_type(self, env):
base_type = self.base.analyse_as_type(env)
if base_type and not base_type.is_pyobject:
if base_type.is_cpp_class:
if isinstance(self.index, TupleNode):
template_values = self.index.args
else:
template_values = [self.index]
type_node = Nodes.TemplatedTypeNode(
pos=self.pos,
positional_args=template_values,
keyword_args=None)
return type_node.analyse(env, base_type=base_type)
elif self.index.is_slice or self.index.is_sequence_constructor:
# memory view
from . import MemoryView
env.use_utility_code(MemoryView.view_utility_code)
axes = [self.index] if self.index.is_slice else list(self.index.args)
return PyrexTypes.MemoryViewSliceType(base_type, MemoryView.get_axes_specs(env, axes))
else:
# C array
index = self.index.compile_time_value(env)
if index is not None:
try:
index = int(index)
except (ValueError, TypeError):
pass
else:
return PyrexTypes.CArrayType(base_type, index)
error(self.pos, "Array size must be a compile time constant")
return None
def type_dependencies(self, env):
return self.base.type_dependencies(env) + self.index.type_dependencies(env)
def infer_type(self, env):
base_type = self.base.infer_type(env)
if self.index.is_slice:
# slicing!
if base_type.is_string:
# sliced C strings must coerce to Python
return bytes_type
elif base_type.is_pyunicode_ptr:
# sliced Py_UNICODE* strings must coerce to Python
return unicode_type
elif base_type in (unicode_type, bytes_type, str_type,
bytearray_type, list_type, tuple_type):
# slicing these returns the same type
return base_type
elif base_type.is_memoryviewslice:
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
index_type = self.index.infer_type(env)
if index_type and index_type.is_int or isinstance(self.index, IntNode):
# indexing!
if base_type is unicode_type:
# Py_UCS4 will automatically coerce to a unicode string
# if required, so this is safe. We only infer Py_UCS4
# when the index is a C integer type. Otherwise, we may
# need to use normal Python item access, in which case
# it's faster to return the one-char unicode string than
# to receive it, throw it away, and potentially rebuild it
# on a subsequent PyObject coercion.
return PyrexTypes.c_py_ucs4_type
elif base_type is str_type:
# always returns str - Py2: bytes, Py3: unicode
return base_type
elif base_type is bytearray_type:
return PyrexTypes.c_uchar_type
elif isinstance(self.base, BytesNode):
#if env.global_scope().context.language_level >= 3:
# # inferring 'char' can be made to work in Python 3 mode
# return PyrexTypes.c_char_type
# Py2/3 return different types on indexing bytes objects
return py_object_type
elif base_type in (tuple_type, list_type):
# if base is a literal, take a look at its values
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is not None:
return item_type
elif base_type.is_ptr or base_type.is_array:
return base_type.base_type
elif base_type.is_ctuple and isinstance(self.index, IntNode):
if self.index.has_constant_result():
index = self.index.constant_result
if index < 0:
index += base_type.size
if 0 <= index < base_type.size:
return base_type.components[index]
elif base_type.is_memoryviewslice:
if base_type.ndim == 0:
pass # probably an error, but definitely don't know what to do - return pyobject for now
if base_type.ndim == 1:
return base_type.dtype
else:
return PyrexTypes.MemoryViewSliceType(base_type.dtype, base_type.axes[1:])
if self.index.is_sequence_constructor and base_type.is_memoryviewslice:
inferred_type = base_type
for a in self.index.args:
if not inferred_type.is_memoryviewslice:
break # something's gone wrong
inferred_type = IndexNode(self.pos, base=ExprNode(self.base.pos, type=inferred_type),
index=a).infer_type(env)
else:
return inferred_type
if base_type.is_cpp_class:
class FakeOperand:
def __init__(self, **kwds):
self.__dict__.update(kwds)
operands = [
FakeOperand(pos=self.pos, type=base_type),
FakeOperand(pos=self.pos, type=index_type),
]
index_func = env.lookup_operator('[]', operands)
if index_func is not None:
return index_func.type.return_type
if is_pythran_expr(base_type) and is_pythran_expr(index_type):
index_with_type = (self.index, index_type)
return PythranExpr(pythran_indexing_type(base_type, [index_with_type]))
# may be slicing or indexing, we don't know
if base_type in (unicode_type, str_type):
# these types always returns their own type on Python indexing/slicing
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
def analyse_types(self, env):
index_node = self.analyse_base_and_index_types(env, getting=True)
if index_node.type.is_cyp_class:
# If a[b] is a cypclass, a new reference is returned.
# In that case it must be stored for later decref-ing.
# XXX: future optimisation: only coerce to temporary
# when the value is not already assigned to a variable.
return index_node.coerce_to_temp(env)
return index_node
def analyse_target_types(self, env):
node = self.analyse_base_and_index_types(env, setting=True)
if node.type.is_const and not node.base.type.is_cyp_class:
error(self.pos, "Assignment to const dereference")
if node is self and not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % node.type)
return node
def analyse_del_expression(self, env):
node = self.analyse_base_and_index_types(env, setting=True, deleting=True)
if node is self and not node.is_lvalue():
error(self.pos, "Deleting non-lvalue of type '%s'" % node.type)
return node
def analyse_base_and_index_types(self, env, getting=False, setting=False, deleting=False,
analyse_base=True):
# Note: This might be cleaned up by having IndexNode
# parsed in a saner way and only construct the tuple if
# needed.
if analyse_base:
self.base = self.base.analyse_types(env)
if self.base.type.is_error:
# Do not visit child tree if base is undeclared to avoid confusing
# error messages
self.type = PyrexTypes.error_type
return self
is_slice = self.index.is_slice
if not env.directives['wraparound']:
if is_slice:
check_negative_indices(self.index.start, self.index.stop)
else:
check_negative_indices(self.index)
# Potentially overflowing index value.
if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value):
self.index = self.index.coerce_to_pyobject(env)
is_memslice = self.base.type.is_memoryviewslice
# Handle the case where base is a literal char* (and we expect a string, not an int)
if not is_memslice and (isinstance(self.base, BytesNode) or is_slice):
if self.base.type.is_string or not (self.base.type.is_ptr or self.base.type.is_array):
self.base = self.base.coerce_to_pyobject(env)
replacement_node = self.analyse_as_buffer_operation(env, getting)
if replacement_node is not None:
return replacement_node
self.nogil = env.nogil
base_type = self.base.type
if not base_type.is_cfunction:
self.index = self.index.analyse_types(env)
self.original_index_type = self.index.type
if self.original_index_type.is_reference:
self.original_index_type = self.original_index_type.ref_base_type
if base_type.is_unicode_char:
# we infer Py_UNICODE/Py_UCS4 for unicode strings in some
# cases, but indexing must still work for them
if setting:
warning(self.pos, "cannot assign to Unicode string index", level=1)
elif self.index.constant_result in (0, -1):
# uchar[0] => uchar
return self.base
self.base = self.base.coerce_to_pyobject(env)
base_type = self.base.type
if base_type.is_pyobject:
return self.analyse_as_pyobject(env, is_slice, getting, setting)
elif base_type.is_ptr or base_type.is_array:
return self.analyse_as_c_array(env, is_slice)
elif base_type.is_cyp_class:
return self.analyse_as_cyp_class(env, setting, deleting)
elif base_type.is_cpp_class:
return self.analyse_as_cpp(env, setting)
elif base_type.is_cfunction:
return self.analyse_as_c_function(env)
elif base_type.is_ctuple:
return self.analyse_as_c_tuple(env, getting, setting)
else:
error(self.pos,
"Attempting to index non-array type '%s'" %
base_type)
self.type = PyrexTypes.error_type
return self
def analyse_as_pyobject(self, env, is_slice, getting, setting):
base_type = self.base.type
if self.index.type.is_unicode_char and base_type is not dict_type:
# TODO: eventually fold into case below and remove warning, once people have adapted their code
warning(self.pos,
"Item lookup of unicode character codes now always converts to a Unicode string. "
"Use an explicit C integer cast to get back the previous integer lookup behaviour.", level=1)
self.index = self.index.coerce_to_pyobject(env)
self.is_temp = 1
elif self.index.type.is_int and base_type is not dict_type:
if (getting
and (base_type in (list_type, tuple_type, bytearray_type))
and (not self.index.type.signed
or not env.directives['wraparound']
or (isinstance(self.index, IntNode) and
self.index.has_constant_result() and self.index.constant_result >= 0))
and not env.directives['boundscheck']):
self.is_temp = 0
else:
self.is_temp = 1
self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env)
self.original_index_type.create_to_py_utility_code(env)
else:
self.index = self.index.coerce_to_pyobject(env)
self.is_temp = 1
if self.index.type.is_int and base_type is unicode_type:
# Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string
# if required, so this is fast and safe
self.type = PyrexTypes.c_py_ucs4_type
elif self.index.type.is_int and base_type is bytearray_type:
if setting:
self.type = PyrexTypes.c_uchar_type
else:
# not using 'uchar' to enable fast and safe error reporting as '-1'
self.type = PyrexTypes.c_int_type
elif is_slice and base_type in (bytes_type, bytearray_type, str_type, unicode_type, list_type, tuple_type):
self.type = base_type
else:
item_type = None
if base_type in (list_type, tuple_type) and self.index.type.is_int:
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is None:
item_type = py_object_type
self.type = item_type
if base_type in (list_type, tuple_type, dict_type):
# do the None check explicitly (not in a helper) to allow optimising it away
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
self.wrap_in_nonecheck_node(env, getting)
return self
def analyse_as_c_array(self, env, is_slice):
base_type = self.base.type
self.type = base_type.base_type
if self.type.is_cpp_class and not self.type.is_cyp_class:
self.type = PyrexTypes.CReferenceType(self.type)
if is_slice:
self.type = base_type
elif self.index.type.is_pyobject:
self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
elif not self.index.type.is_int:
error(self.pos, "Invalid index type '%s'" % self.index.type)
return self
def cpp_error(self, msg):
error(self.pos, msg)
self.type = PyrexTypes.error_type
self.result_code = ""
return self
def analyse_as_cpp(self, env, setting):
base_type = self.base.type
function = env.lookup_operator("[]", [self.base, self.index])
if function is None:
return self.cpp_error("Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type))
func_type = function.type
if func_type.is_ptr:
func_type = func_type.base_type
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check == '+':
if not setting:
self.is_temp = True
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
elif self.exception_check == '~':
self.is_temp = True
self.index = self.index.coerce_to(func_type.args[0].type, env)
self.type = func_type.return_type.as_returned_type()
if setting and not self.type.is_reference:
error(self.pos, "Can't set non-reference result '%s'" % self.type)
return self
def analyse_as_cyp_class(self, env, setting, deleting):
if not setting:
return self.analyse_cyp_class_getitem(env)
elif not deleting:
return self.analyse_cyp_class_setitem(env)
else:
return self.analyse_cyp_class_delitem(env)
def analyse_cyp_class_getitem(self, env):
base_type = self.base.type
function = base_type.scope.lookup("__getitem__")
if function is None:
return self.cpp_error("Cypclass %s does not support getting subscript item" % base_type)
elif len(function.all_alternatives()) != 1:
return self.cpp_error("%s.__getitem__ must have only one signature" % base_type)
func_type = function.type
if len(func_type.args) != 1:
return self.cpp_error("%s.__getitem__ must take one argument" % base_type)
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check == '+':
self.is_temp = True
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
elif self.exception_check == '~':
self.is_temp = True
self.index = self.index.coerce_to(function.type.args[0].type, env)
self.type = func_type.return_type.as_returned_type()
return self
def analyse_cyp_class_setitem(self, env):
base_type = self.base.type
function = base_type.scope.lookup("__setitem__")
if function is None:
return self.cpp_error("Cypclass %s does not support setting subscript item" % base_type)
elif len(function.all_alternatives()) != 1:
return self.cpp_error("Cypclass method %s.__setitem__ must have only one signature" % base_type)
func_type = function.type
if len(func_type.args) != 2:
return self.cpp_error("Cypclass method %s.__setitem__ must take two arguments" % base_type)
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check == '+' and self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
self.index = self.index.coerce_to(function.type.args[0].type, env)
self.type = func_type.args[1].type
return self
def analyse_cyp_class_delitem(self, env):
base_type = self.base.type
function = base_type.scope.lookup("__delitem__")
if function is None:
return self.cpp_error("Cypclass %s does not support deleting subscript item" % base_type)
elif len(function.all_alternatives()) != 1:
return self.cpp_error("Cypclass method %s.__delitem__ must have only one signature" % base_type)
func_type = function.type
if len(func_type.args) != 1:
return self.cpp_error("Cypclass method %s.__delitem__ must take one argument" % base_type)
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check == '+' and self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
self.index = self.index.coerce_to(function.type.args[0].type, env)
self.type = func_type.return_type.as_returned_type()
return self
def analyse_as_c_function(self, env):
base_type = self.base.type
if base_type.is_fused:
self.parse_indexed_fused_cdef(env)
else:
self.type_indices = self.parse_index_as_types(env)
self.index = None # FIXME: use a dedicated Node class instead of generic IndexNode
if base_type.templates is None:
error(self.pos, "Can only parameterize template functions.")
self.type = error_type
elif self.type_indices is None:
# Error recorded earlier.
self.type = error_type
elif len(base_type.templates) != len(self.type_indices):
error(self.pos, "Wrong number of template arguments: expected %s, got %s" % (
(len(base_type.templates), len(self.type_indices))))
self.type = error_type
else:
self.type = base_type.specialize(dict(zip(base_type.templates, self.type_indices)))
# FIXME: use a dedicated Node class instead of generic IndexNode
return self
def analyse_as_c_tuple(self, env, getting, setting):
base_type = self.base.type
if isinstance(self.index, IntNode) and self.index.has_constant_result():
index = self.index.constant_result
if -base_type.size <= index < base_type.size:
if index < 0:
index += base_type.size
self.type = base_type.components[index]
else:
error(self.pos,
"Index %s out of bounds for '%s'" %
(index, base_type))
self.type = PyrexTypes.error_type
return self
else:
self.base = self.base.coerce_to_pyobject(env)
return self.analyse_base_and_index_types(env, getting=getting, setting=setting, analyse_base=False)
def analyse_as_buffer_operation(self, env, getting):
"""
Analyse buffer indexing and memoryview indexing/slicing
"""
if isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
base = self.base
base_type = base.type
replacement_node = None
if base_type.is_memoryviewslice:
# memoryviewslice indexing or slicing
from . import MemoryView
if base.is_memview_slice:
# For memory views, "view[i][j]" is the same as "view[i, j]" => use the latter for speed.
merged_indices = base.merged_indices(indices)
if merged_indices is not None:
base = base.base
base_type = base.type
indices = merged_indices
have_slices, indices, newaxes = MemoryView.unellipsify(indices, base_type.ndim)
if have_slices:
replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=base)
else:
replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=base)
elif base_type.is_buffer or base_type.is_pythran_expr:
if base_type.is_pythran_expr or len(indices) == base_type.ndim:
# Buffer indexing
is_buffer_access = True
indices = [index.analyse_types(env) for index in indices]
if base_type.is_pythran_expr:
do_replacement = all(
index.type.is_int or index.is_slice or index.type.is_pythran_expr
for index in indices)
if do_replacement:
for i,index in enumerate(indices):
if index.is_slice:
index = SliceIntNode(index.pos, start=index.start, stop=index.stop, step=index.step)
index = index.analyse_types(env)
indices[i] = index
else:
do_replacement = all(index.type.is_int for index in indices)
if do_replacement:
replacement_node = BufferIndexNode(self.pos, indices=indices, base=base)
# On cloning, indices is cloned. Otherwise, unpack index into indices.
assert not isinstance(self.index, CloneNode)
if replacement_node is not None:
replacement_node = replacement_node.analyse_types(env, getting)
return replacement_node
def wrap_in_nonecheck_node(self, env, getting):
if not env.directives['nonecheck'] or not self.base.may_be_none():
return
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
def parse_index_as_types(self, env, required=True):
if isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
type_indices = []
for index in indices:
type_indices.append(index.analyse_as_type(env))
if type_indices[-1] is None:
if required:
error(index.pos, "not parsable as a type")
return None
return type_indices
def parse_indexed_fused_cdef(self, env):
"""
Interpret fused_cdef_func[specific_type1, ...]
Note that if this method is called, we are an indexed cdef function
with fused argument types, and this IndexNode will be replaced by the
NameNode with specific entry just after analysis of expressions by
AnalyseExpressionsTransform.
"""
self.type = PyrexTypes.error_type
self.is_fused_index = True
base_type = self.base.type
positions = []
if self.index.is_name or self.index.is_attribute:
positions.append(self.index.pos)
elif isinstance(self.index, TupleNode):
for arg in self.index.args:
positions.append(arg.pos)
specific_types = self.parse_index_as_types(env, required=False)
if specific_types is None:
self.index = self.index.analyse_types(env)
if not self.base.entry.as_variable:
error(self.pos, "Can only index fused functions with types")
else:
# A cpdef function indexed with Python objects
self.base.entry = self.entry = self.base.entry.as_variable
self.base.type = self.type = self.entry.type
self.base.is_temp = True
self.is_temp = True
self.entry.used = True
self.is_fused_index = False
return
for i, type in enumerate(specific_types):
specific_types[i] = type.specialize_fused(env)
fused_types = base_type.get_fused_types()
if len(specific_types) > len(fused_types):
return error(self.pos, "Too many types specified")
elif len(specific_types) < len(fused_types):
t = fused_types[len(specific_types)]
return error(self.pos, "Not enough types specified to specialize "
"the function, %s is still fused" % t)
# See if our index types form valid specializations
for pos, specific_type, fused_type in zip(positions,
specific_types,
fused_types):
if not any([specific_type.same_as(t) for t in fused_type.types]):
return error(pos, "Type not in fused type")
if specific_type is None or specific_type.is_error:
return
fused_to_specific = dict(zip(fused_types, specific_types))
type = base_type.specialize(fused_to_specific)
if type.is_fused:
# Only partially specific, this is invalid
error(self.pos,
"Index operation makes function only partially specific")
else:
# Fully specific, find the signature with the specialized entry
for signature in self.base.type.get_all_specialized_function_types():
if type.same_as(signature):
self.type = signature
if self.base.is_attribute:
# Pretend to be a normal attribute, for cdef extension
# methods
self.entry = signature.entry
self.is_attribute = True
self.obj = self.base.obj
self.type.entry.used = True
self.base.type = signature
self.base.entry = signature.entry
break
else:
# This is a bug
raise InternalError("Couldn't find the right signature")
def coerce_to_temp(self, env):
temp = ExprNode.coerce_to_temp(self, env)
if self.type.is_cyp_class and not (self.base.type.is_array or self.base.type.is_ptr):
# This is already a new reference
# either via cpp operator[]
# or via cypclass __getitem__
temp.use_managed_ref = False
return temp
def result_is_new_reference(self):
if self.type.is_cyp_class and not (self.base.type.is_array or self.base.type.is_ptr):
# This is already a new reference
# either via cpp operator[]
# or via cypclass __getitem__
return True
return ExprNode.result_is_new_reference(self)
gil_message = "Indexing Python object"
def calculate_result_code(self):
if self.base.type in (list_type, tuple_type, bytearray_type):
if self.base.type is list_type:
index_code = "PyList_GET_ITEM(%s, %s)"
elif self.base.type is tuple_type:
index_code = "PyTuple_GET_ITEM(%s, %s)"
elif self.base.type is bytearray_type:
index_code = "((unsigned char)(PyByteArray_AS_STRING(%s)[%s]))"
else:
assert False, "unexpected base type in indexing: %s" % self.base.type
elif self.base.type.is_cfunction:
template_strings = [
"%s%s" % (
param.empty_declaration_code(),
"*" if param.is_cyp_class else ""
)
for param in self.type_indices
]
return "%s<%s>" % (
self.base.result(),
",".join(template_strings))
elif self.base.type.is_ctuple:
index = self.index.constant_result
if index < 0:
index += self.base.type.size
return "%s.f%s" % (self.base.result(), index)
else:
if (self.type.is_ptr or self.type.is_array) and self.type == self.base.type:
error(self.pos, "Invalid use of pointer slice")
return
if self.base.type.is_cyp_class:
index_code = "((*%s)[%s])"
else:
index_code = "(%s[%s])"
return index_code % (self.base.result(), self.index.result())
def extra_index_params(self, code):
if self.index.type.is_int:
is_list = self.base.type is list_type
wraparound = (
bool(code.globalstate.directives['wraparound']) and
self.original_index_type.signed and
not (isinstance(self.index.constant_result, _py_int_types)
and self.index.constant_result >= 0))
boundscheck = bool(code.globalstate.directives['boundscheck'])
return ", %s, %d, %s, %d, %d, %d" % (
self.original_index_type.empty_declaration_code(),
self.original_index_type.signed and 1 or 0,
self.original_index_type.to_py_function,
is_list, wraparound, boundscheck)
else:
return ""
def generate_result_code(self, code):
if not self.is_temp:
# all handled in self.calculate_result_code()
return
utility_code = None
if self.type.is_pyobject:
error_value = 'NULL'
if self.index.type.is_int:
if self.base.type is list_type:
function = "__Pyx_GetItemInt_List"
elif self.base.type is tuple_type:
function = "__Pyx_GetItemInt_Tuple"
else:
function = "__Pyx_GetItemInt"
utility_code = TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c")
else:
if self.base.type is dict_type:
function = "__Pyx_PyDict_GetItem"
utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")
elif self.base.type is py_object_type and self.index.type in (str_type, unicode_type):
# obj[str] is probably doing a dict lookup
function = "__Pyx_PyObject_Dict_GetItem"
utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")
else:
function = "__Pyx_PyObject_GetItem"
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
utility_code = UtilityCode.load_cached("ObjectGetItem", "ObjectHandling.c")
elif self.type.is_unicode_char and self.base.type is unicode_type:
assert self.index.type.is_int
function = "__Pyx_GetItemInt_Unicode"
error_value = '(Py_UCS4)-1'
utility_code = UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c")
elif self.base.type is bytearray_type:
assert self.index.type.is_int
assert self.type.is_int
function = "__Pyx_GetItemInt_ByteArray"
error_value = '-1'
utility_code = UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c")
elif not (self.base.type.is_cpp_class and self.exception_check in ('+', '~')):
assert False, "unexpected type %s and base type %s for indexing" % (
self.type, self.base.type)
if utility_code is not None:
code.globalstate.use_utility_code(utility_code)
if self.index.type.is_int:
index_code = self.index.result()
else:
index_code = self.index.py_result()
if self.base.type.is_cpp_class:
base_result = self.base.result()
if self.base.type.is_cyp_class:
base_result = "(*%s)" % base_result
evaluation_code = "%s = %s[%s];" % (self.result(), base_result, self.index.result())
if self.exception_check == '+':
translate_cpp_exception(code, self.pos,
evaluation_code,
self.result() if self.type.is_pyobject else None,
self.exception_value, self.in_nogil_context)
elif self.exception_check == '~':
exc_check_code = self.type.error_condition(self.result())
goto_error = code.error_goto_if(exc_check_code, self.pos)
code.putln("%s %s" % (evaluation_code, goto_error))
else:
error_check = '!%s' if error_value == 'NULL' else '%%s == %s' % error_value
code.putln(
"%s = %s(%s, %s%s); %s" % (
self.result(),
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
code.error_goto_if(error_check % self.result(), self.pos)))
if self.type.is_pyobject:
self.generate_gotref(code)
elif self.type.is_cyp_class:
code.put_gotref(self.result(), self.type)
def generate_setitem_code(self, value_code, code):
if self.index.type.is_int:
if self.base.type is bytearray_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemIntByteArray", "StringTools.c"))
function = "__Pyx_SetItemInt_ByteArray"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemInt", "ObjectHandling.c"))
function = "__Pyx_SetItemInt"
index_code = self.index.result()
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_SetItem"
# It would seem that we could specialized lists/tuples, but that
# shouldn't happen here.
# Both PyList_SetItem() and PyTuple_SetItem() take a Py_ssize_t as
# index instead of an object, and bad conversion here would give
# the wrong exception. Also, tuples are supposed to be immutable,
# and raise a TypeError when trying to set their entries
# (PyTuple_SetItem() is for creating new tuples from scratch).
else:
function = "PyObject_SetItem"
code.putln(code.error_goto_if_neg(
"%s(%s, %s, %s%s)" % (
function,
self.base.py_result(),
index_code,
value_code,
self.extra_index_params(code)),
self.pos))
def generate_cyp_class_setitem_code(self, value_code, code):
function = self.base.type.scope.lookup_here("__setitem__")
function_code = function.cname
setitem_code = "%s->%s(%s, %s);" % (
self.base.result(),
function_code,
self.index.result(),
value_code)
if self.exception_check == "+":
translate_cpp_exception(code, self.pos,
setitem_code,
None,
self.exception_value, self.in_nogil_context)
elif self.exception_check == "~":
checked_result_type = function.type.return_type
temp_code = code.funcstate.allocate_temp(checked_result_type, manage_ref=False)
exc_check_code = checked_result_type.error_condition(temp_code)
goto_error = code.error_goto_if(exc_check_code, self.pos)
code.putln("%s = %s %s" % (temp_code, setitem_code, goto_error))
code.funcstate.release_temp(temp_code)
else:
code.putln(setitem_code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
self.generate_setitem_code(rhs.py_result(), code)
elif self.base.type.is_cyp_class:
self.generate_cyp_class_setitem_code(rhs.result(), code)
elif self.base.type is bytearray_type:
value_code = self._check_byte_value(code, rhs)
self.generate_setitem_code(value_code, code)
elif self.base.type.is_cpp_class and self.exception_check and self.exception_check == '+':
if overloaded_assignment and exception_check and self.exception_value != exception_value:
# Handle the case that both the index operator and the assignment
# operator have a c++ exception handler and they are not the same.
translate_double_cpp_exception(code, self.pos, self.type,
self.result(), rhs.result(), self.exception_value,
exception_value, self.in_nogil_context)
else:
# Handle the case that only the index operator has a
# c++ exception handler, or that
# both exception handlers are the same.
translate_cpp_exception(code, self.pos,
"%s = %s;" % (self.result(), rhs.result()),
self.result() if self.type.is_pyobject else None,
self.exception_value, self.in_nogil_context)
else:
code.putln(
"%s = %s;" % (self.result(), rhs.result()))
if (self.base.type.is_array or self.base.type.is_ptr) and self.type.is_cyp_class:
# XXX This is a good place for refcounting optimisation:
# if the rhs is a cypclass temporary we are doing an INCREF and a DECREF.
code.put_incref(self.result(), self.type)
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def _check_byte_value(self, code, rhs):
# TODO: should we do this generally on downcasts, or just here?
assert rhs.type.is_int, repr(rhs.type)
value_code = rhs.result()
if rhs.has_constant_result():
if 0 <= rhs.constant_result < 256:
return value_code
needs_cast = True # make at least the C compiler happy
warning(rhs.pos,
"value outside of range(0, 256)"
" when assigning to byte: %s" % rhs.constant_result,
level=1)
else:
needs_cast = rhs.type != PyrexTypes.c_uchar_type
if not self.nogil:
conditions = []
if rhs.is_literal or rhs.type.signed:
conditions.append('%s < 0' % value_code)
if (rhs.is_literal or not
(rhs.is_temp and rhs.type in (
PyrexTypes.c_uchar_type, PyrexTypes.c_char_type,
PyrexTypes.c_schar_type))):
conditions.append('%s > 255' % value_code)
if conditions:
code.putln("if (unlikely(%s)) {" % ' || '.join(conditions))
code.putln(
'PyErr_SetString(PyExc_ValueError,'
' "byte must be in range(0, 256)"); %s' %
code.error_goto(self.pos))
code.putln("}")
if needs_cast:
value_code = '((unsigned char)%s)' % value_code
return value_code
def generate_cyp_class_delitem_code(self, code):
function = self.base.type.scope.lookup_here("__delitem__")
function_code = function.cname
setitem_code = "%s->%s(%s);" % (
self.base.result(),
function_code,
self.index.result())
if self.exception_check == "+":
translate_cpp_exception(code, self.pos,
setitem_code,
None,
self.exception_value, self.in_nogil_context)
elif self.exception_check == "~":
temp_code = code.funcstate.allocate_temp(self.type, manage_ref=False)
exc_check_code = self.type.error_condition(temp_code)
goto_error = code.error_goto_if(exc_check_code, self.pos)
code.putln("%s = %s %s" % (temp_code, setitem_code, goto_error))
code.funcstate.release_temp(temp_code)
else:
code.putln(setitem_code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.generate_subexpr_evaluation_code(code)
if self.base.type.is_cyp_class:
self.generate_cyp_class_delitem_code(code)
else:
#if self.type.is_pyobject:
if self.index.type.is_int:
function = "__Pyx_DelItemInt"
index_code = self.index.result()
code.globalstate.use_utility_code(
UtilityCode.load_cached("DelItemInt", "ObjectHandling.c"))
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_DelItem"
else:
function = "PyObject_DelItem"
code.putln(code.error_goto_if_neg(
"%s(%s, %s%s)" % (
function,
self.base.py_result(),
index_code,
self.extra_index_params(code)),
self.pos))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
class BufferIndexNode(_IndexingBaseNode):
"""
Indexing of buffers and memoryviews. This node is created during type
analysis from IndexNode and replaces it.
Attributes:
base - base node being indexed
indices - list of indexing expressions
"""
subexprs = ['base', 'indices']
is_buffer_access = True
# Whether we're assigning to a buffer (in that case it needs to be writable)
writable_needed = False
# Any indexing temp variables that we need to clean up.
index_temps = ()
def analyse_target_types(self, env):
self.analyse_types(env, getting=False)
def analyse_types(self, env, getting=True):
"""
Analyse types for buffer indexing only. Overridden by memoryview
indexing and slicing subclasses
"""
# self.indices are already analyzed
if not self.base.is_name and not is_pythran_expr(self.base.type):
error(self.pos, "Can only index buffer variables")
self.type = error_type
return self
if not getting:
if not self.base.entry.type.writable:
error(self.pos, "Writing to readonly buffer")
else:
self.writable_needed = True
if self.base.type.is_buffer:
self.base.entry.buffer_aux.writable_needed = True
self.none_error_message = "'NoneType' object is not subscriptable"
self.analyse_buffer_index(env, getting)
self.wrap_in_nonecheck_node(env)
return self
def analyse_buffer_index(self, env, getting):
if is_pythran_expr(self.base.type):
index_with_type_list = [(idx, idx.type) for idx in self.indices]
self.type = PythranExpr(pythran_indexing_type(self.base.type, index_with_type_list))
else:
self.base = self.base.coerce_to_simple(env)
self.type = self.base.type.dtype
self.buffer_type = self.base.type
if getting and (self.type.is_pyobject or self.type.is_pythran_expr):
self.is_temp = True
def analyse_assignment(self, rhs):
"""
Called by IndexNode when this node is assigned to,
with the rhs of the assignment
"""
def wrap_in_nonecheck_node(self, env):
if not env.directives['nonecheck'] or not self.base.may_be_none():
return
self.base = self.base.as_none_safe_node(self.none_error_message)
def nogil_check(self, env):
if self.is_buffer_access or self.is_memview_index:
if self.type.is_pyobject:
error(self.pos, "Cannot access buffer with object dtype without gil")
self.type = error_type
def calculate_result_code(self):
return "(*%s)" % self.buffer_ptr_code
def buffer_entry(self):
base = self.base
if self.base.is_nonecheck:
base = base.arg
return base.type.get_entry(base)
def get_index_in_temp(self, code, ivar):
ret = code.funcstate.allocate_temp(
PyrexTypes.widest_numeric_type(
ivar.type,
PyrexTypes.c_ssize_t_type if ivar.type.signed else PyrexTypes.c_size_t_type),
manage_ref=False)
code.putln("%s = %s;" % (ret, ivar.result()))
return ret
def buffer_lookup_code(self, code):
"""
ndarray[1, 2, 3] and memslice[1, 2, 3]
"""
if self.in_nogil_context:
if self.is_buffer_access or self.is_memview_index:
if code.globalstate.directives['boundscheck']:
warning(self.pos, "Use boundscheck(False) for faster access", level=1)
# Assign indices to temps of at least (s)size_t to allow further index calculations.
self.index_temps = index_temps = [self.get_index_in_temp(code,ivar) for ivar in self.indices]
# Generate buffer access code using these temps
from . import Buffer
buffer_entry = self.buffer_entry()
if buffer_entry.type.is_buffer:
negative_indices = buffer_entry.type.negative_indices
else:
negative_indices = Buffer.buffer_defaults['negative_indices']
return buffer_entry, Buffer.put_buffer_lookup_code(
entry=buffer_entry,
index_signeds=[ivar.type.signed for ivar in self.indices],
index_cnames=index_temps,
directives=code.globalstate.directives,
pos=self.pos, code=code,
negative_indices=negative_indices,
in_nogil_context=self.in_nogil_context)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
self.generate_subexpr_evaluation_code(code)
self.generate_buffer_setitem_code(rhs, code)
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_buffer_setitem_code(self, rhs, code, op=""):
base_type = self.base.type
if is_pythran_expr(base_type) and is_pythran_supported_type(rhs.type):
obj = code.funcstate.allocate_temp(PythranExpr(pythran_type(self.base.type)), manage_ref=False)
# We have got to do this because we have to declare pythran objects
# at the beginning of the functions.
# Indeed, Cython uses "goto" statement for error management, and
# RAII doesn't work with that kind of construction.
# Moreover, the way Pythran expressions are made is that they don't
# support move-assignation easily.
# This, we explicitly destroy then in-place new objects in this
# case.
code.putln("__Pyx_call_destructor(%s);" % obj)
code.putln("new (&%s) decltype(%s){%s};" % (obj, obj, self.base.pythran_result()))
code.putln("%s%s %s= %s;" % (
obj,
pythran_indexing_code(self.indices),
op,
rhs.pythran_result()))
code.funcstate.release_temp(obj)
return
# Used from generate_assignment_code and InPlaceAssignmentNode
buffer_entry, ptrexpr = self.buffer_lookup_code(code)
if self.buffer_type.dtype.is_pyobject:
# Must manage refcounts. Decref what is already there
# and incref what we put in.
ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type,
manage_ref=False)
rhs_code = rhs.result()
code.putln("%s = %s;" % (ptr, ptrexpr))
code.put_gotref("*%s" % ptr, self.buffer_type.dtype)
code.putln("__Pyx_INCREF(%s); __Pyx_DECREF(*%s);" % (
rhs_code, ptr))
code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
code.put_giveref("*%s" % ptr, self.buffer_type.dtype)
code.funcstate.release_temp(ptr)
else:
# Simple case
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
def generate_result_code(self, code):
if is_pythran_expr(self.base.type):
res = self.result()
code.putln("__Pyx_call_destructor(%s);" % res)
code.putln("new (&%s) decltype(%s){%s%s};" % (
res,
res,
self.base.pythran_result(),
pythran_indexing_code(self.indices)))
return
buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code)
if self.type.is_pyobject:
# is_temp is True, so must pull out value and incref it.
# NOTE: object temporary results for nodes are declared
# as PyObject *, so we need a cast
code.putln("%s = (PyObject *) *%s;" % (self.result(), self.buffer_ptr_code))
code.putln("__Pyx_INCREF((PyObject*)%s);" % self.result())
def free_subexpr_temps(self, code):
for temp in self.index_temps:
code.funcstate.release_temp(temp)
self.index_temps = ()
super(BufferIndexNode, self).free_subexpr_temps(code)
class MemoryViewIndexNode(BufferIndexNode):
is_memview_index = True
is_buffer_access = False
warned_untyped_idx = False
def analyse_types(self, env, getting=True):
# memoryviewslice indexing or slicing
from . import MemoryView
self.is_pythran_mode = has_np_pythran(env)
indices = self.indices
have_slices, indices, newaxes = MemoryView.unellipsify(indices, self.base.type.ndim)
if not getting:
self.writable_needed = True
if self.base.is_name or self.base.is_attribute:
self.base.entry.type.writable_needed = True
self.memslice_index = (not newaxes and len(indices) == self.base.type.ndim)
axes = []
index_type = PyrexTypes.c_py_ssize_t_type
new_indices = []
if len(indices) - len(newaxes) > self.base.type.ndim:
self.type = error_type
error(indices[self.base.type.ndim].pos,
"Too many indices specified for type %s" % self.base.type)
return self
axis_idx = 0
for i, index in enumerate(indices[:]):
index = index.analyse_types(env)
if index.is_none:
self.is_memview_slice = True
new_indices.append(index)
axes.append(('direct', 'strided'))
continue
access, packing = self.base.type.axes[axis_idx]
axis_idx += 1
if index.is_slice:
self.is_memview_slice = True
if index.step.is_none:
axes.append((access, packing))
else:
axes.append((access, 'strided'))
# Coerce start, stop and step to temps of the right type
for attr in ('start', 'stop', 'step'):
value = getattr(index, attr)
if not value.is_none:
value = value.coerce_to(index_type, env)
#value = value.coerce_to_temp(env)
setattr(index, attr, value)
new_indices.append(value)
elif index.type.is_int or index.type.is_pyobject:
if index.type.is_pyobject and not self.warned_untyped_idx:
warning(index.pos, "Index should be typed for more efficient access", level=2)
MemoryViewIndexNode.warned_untyped_idx = True
self.is_memview_index = True
index = index.coerce_to(index_type, env)
indices[i] = index
new_indices.append(index)
else:
self.type = error_type
error(index.pos, "Invalid index for memoryview specified, type %s" % index.type)
return self
### FIXME: replace by MemoryViewSliceNode if is_memview_slice ?
self.is_memview_index = self.is_memview_index and not self.is_memview_slice
self.indices = new_indices
# All indices with all start/stop/step for slices.
# We need to keep this around.
self.original_indices = indices
self.nogil = env.nogil
self.analyse_operation(env, getting, axes)
self.wrap_in_nonecheck_node(env)
return self
def analyse_operation(self, env, getting, axes):
self.none_error_message = "Cannot index None memoryview slice"
self.analyse_buffer_index(env, getting)
def analyse_broadcast_operation(self, rhs):
"""
Support broadcasting for slice assignment.
E.g.
m_2d[...] = m_1d # or,
m_1d[...] = m_2d # if the leading dimension has extent 1
"""
if self.type.is_memoryviewslice:
lhs = self
if lhs.is_memview_broadcast or rhs.is_memview_broadcast:
lhs.is_memview_broadcast = True
rhs.is_memview_broadcast = True
def analyse_as_memview_scalar_assignment(self, rhs):
lhs = self.analyse_assignment(rhs)
if lhs:
rhs.is_memview_copy_assignment = lhs.is_memview_copy_assignment
return lhs
return self
class MemoryViewSliceNode(MemoryViewIndexNode):
is_memview_slice = True
# No-op slicing operation, this node will be replaced
is_ellipsis_noop = False
is_memview_scalar_assignment = False
is_memview_index = False
is_memview_broadcast = False
def analyse_ellipsis_noop(self, env, getting):
"""Slicing operations needing no evaluation, i.e. m[...] or m[:, :]"""
### FIXME: replace directly
self.is_ellipsis_noop = all(
index.is_slice and index.start.is_none and index.stop.is_none and index.step.is_none
for index in self.indices)
if self.is_ellipsis_noop:
self.type = self.base.type
def analyse_operation(self, env, getting, axes):
from . import MemoryView
if not getting:
self.is_memview_broadcast = True
self.none_error_message = "Cannot assign to None memoryview slice"
else:
self.none_error_message = "Cannot slice None memoryview slice"
self.analyse_ellipsis_noop(env, getting)
if self.is_ellipsis_noop:
return
self.index = None
self.is_temp = True
self.use_managed_ref = True
if not MemoryView.validate_axes(self.pos, axes):
self.type = error_type
return
self.type = PyrexTypes.MemoryViewSliceType(self.base.type.dtype, axes)
if not (self.base.is_simple() or self.base.result_in_temp()):
self.base = self.base.coerce_to_temp(env)
def analyse_assignment(self, rhs):
if not rhs.type.is_memoryviewslice and (
self.type.dtype.assignable_from(rhs.type) or
rhs.type.is_pyobject):
# scalar assignment
return MemoryCopyScalar(self.pos, self)
else:
return MemoryCopySlice(self.pos, self)
def merged_indices(self, indices):
"""Return a new list of indices/slices with 'indices' merged into the current ones
according to slicing rules.
Is used to implement "view[i][j]" => "view[i, j]".
Return None if the indices cannot (easily) be merged at compile time.
"""
if not indices:
return None
# NOTE: Need to evaluate "self.original_indices" here as they might differ from "self.indices".
new_indices = self.original_indices[:]
indices = indices[:]
for i, s in enumerate(self.original_indices):
if s.is_slice:
if s.start.is_none and s.stop.is_none and s.step.is_none:
# Full slice found, replace by index.
new_indices[i] = indices[0]
indices.pop(0)
if not indices:
return new_indices
else:
# Found something non-trivial, e.g. a partial slice.
return None
elif not s.type.is_int:
# Not a slice, not an integer index => could be anything...
return None
if indices:
if len(new_indices) + len(indices) > self.base.type.ndim:
return None
new_indices += indices
return new_indices
def is_simple(self):
if self.is_ellipsis_noop:
# TODO: fix SimpleCallNode.is_simple()
return self.base.is_simple() or self.base.result_in_temp()
return self.result_in_temp()
def calculate_result_code(self):
"""This is called in case this is a no-op slicing node"""
return self.base.result()
def generate_result_code(self, code):
if self.is_ellipsis_noop:
return ### FIXME: remove
buffer_entry = self.buffer_entry()
have_gil = not self.in_nogil_context
# TODO Mark: this is insane, do it better
have_slices = False
it = iter(self.indices)
for index in self.original_indices:
if index.is_slice:
have_slices = True
if not index.start.is_none:
index.start = next(it)
if not index.stop.is_none:
index.stop = next(it)
if not index.step.is_none:
index.step = next(it)
else:
next(it)
assert not list(it)
buffer_entry.generate_buffer_slice_code(
code, self.original_indices, self.result(), self.type,
have_gil=have_gil, have_slices=have_slices,
directives=code.globalstate.directives)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
if self.is_ellipsis_noop:
self.generate_subexpr_evaluation_code(code)
else:
self.generate_evaluation_code(code)
if self.is_memview_scalar_assignment:
self.generate_memoryviewslice_assign_scalar_code(rhs, code)
else:
self.generate_memoryviewslice_setslice_code(rhs, code)
if self.is_ellipsis_noop:
self.generate_subexpr_disposal_code(code)
else:
self.generate_disposal_code(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
class MemoryCopyNode(ExprNode):
"""
Wraps a memoryview slice for slice assignment.
dst: destination mememoryview slice
"""
subexprs = ['dst']
def __init__(self, pos, dst):
super(MemoryCopyNode, self).__init__(pos)
self.dst = dst
self.type = dst.type
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
self.dst.generate_evaluation_code(code)
self._generate_assignment_code(rhs, code)
self.dst.generate_disposal_code(code)
self.dst.free_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
class MemoryCopySlice(MemoryCopyNode):
"""
Copy the contents of slice src to slice dst. Does not support indirect
slices.
memslice1[...] = memslice2
memslice1[:] = memslice2
"""
is_memview_copy_assignment = True
copy_slice_cname = "__pyx_memoryview_copy_contents"
def _generate_assignment_code(self, src, code):
dst = self.dst
src.type.assert_direct_dims(src.pos)
dst.type.assert_direct_dims(dst.pos)
code.putln(code.error_goto_if_neg(
"%s(%s, %s, %d, %d, %d)" % (self.copy_slice_cname,
src.result(), dst.result(),
src.type.ndim, dst.type.ndim,
dst.type.dtype.is_pyobject),
dst.pos))
class MemoryCopyScalar(MemoryCopyNode):
"""
Assign a scalar to a slice. dst must be simple, scalar will be assigned
to a correct type and not just something assignable.
memslice1[...] = 0.0
memslice1[:] = 0.0
"""
def __init__(self, pos, dst):
super(MemoryCopyScalar, self).__init__(pos, dst)
self.type = dst.type.dtype
def _generate_assignment_code(self, scalar, code):
from . import MemoryView
self.dst.type.assert_direct_dims(self.dst.pos)
dtype = self.dst.type.dtype
type_decl = dtype.declaration_code("")
slice_decl = self.dst.type.declaration_code("")
code.begin_block()
code.putln("%s __pyx_temp_scalar = %s;" % (type_decl, scalar.result()))
if self.dst.result_in_temp() or self.dst.is_simple():
dst_temp = self.dst.result()
else:
code.putln("%s __pyx_temp_slice = %s;" % (slice_decl, self.dst.result()))
dst_temp = "__pyx_temp_slice"
force_strided = False
indices = self.dst.original_indices
for idx in indices:
if isinstance(idx, SliceNode) and not (idx.start.is_none and
idx.stop.is_none and
idx.step.is_none):
force_strided = True
slice_iter_obj = MemoryView.slice_iter(self.dst.type, dst_temp,
self.dst.type.ndim, code,
force_strided=force_strided)
p = slice_iter_obj.start_loops()
if dtype.is_pyobject:
code.putln("Py_DECREF(*(PyObject **) %s);" % p)
code.putln("*((%s *) %s) = __pyx_temp_scalar;" % (type_decl, p))
if dtype.is_pyobject:
code.putln("Py_INCREF(__pyx_temp_scalar);")
slice_iter_obj.end_loops()
code.end_block()
class SliceIndexNode(ExprNode):
# 2-element slice indexing
#
# base ExprNode
# start ExprNode or None
# stop ExprNode or None
# slice ExprNode or None constant slice object
subexprs = ['base', 'start', 'stop', 'slice']
slice = None
def infer_type(self, env):
base_type = self.base.infer_type(env)
if base_type.is_string or base_type.is_cpp_class:
return bytes_type
elif base_type.is_pyunicode_ptr:
return unicode_type
elif base_type in (bytes_type, bytearray_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return base_type
elif base_type.is_ptr or base_type.is_array:
return PyrexTypes.c_array_type(base_type.base_type, None)
return py_object_type
def inferable_item_node(self, index=0):
# slicing shouldn't change the result type of the base, but the index might
if index is not not_a_constant and self.start:
if self.start.has_constant_result():
index += self.start.constant_result
else:
index = not_a_constant
return self.base.inferable_item_node(index)
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def calculate_constant_result(self):
if self.start is None:
start = None
else:
start = self.start.constant_result
if self.stop is None:
stop = None
else:
stop = self.stop.constant_result
self.constant_result = self.base.constant_result[start:stop]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
if self.start is None:
start = 0
else:
start = self.start.compile_time_value(denv)
if self.stop is None:
stop = None
else:
stop = self.stop.compile_time_value(denv)
try:
return base[start:stop]
except Exception as e:
self.compile_time_value_error(e)
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, getting=False)
# when assigning, we must accept any Python type
if node.type.is_pyobject:
node.type = py_object_type
return node
def analyse_types(self, env, getting=True):
self.base = self.base.analyse_types(env)
if self.base.type.is_buffer or self.base.type.is_pythran_expr or self.base.type.is_memoryviewslice:
none_node = NoneNode(self.pos)
index = SliceNode(self.pos,
start=self.start or none_node,
stop=self.stop or none_node,
step=none_node)
index_node = IndexNode(self.pos, index=index, base=self.base)
return index_node.analyse_base_and_index_types(
env, getting=getting, setting=not getting,
analyse_base=False)
if self.start:
self.start = self.start.analyse_types(env)
if self.stop:
self.stop = self.stop.analyse_types(env)
if not env.directives['wraparound']:
check_negative_indices(self.start, self.stop)
base_type = self.base.type
if base_type.is_array and not getting:
# cannot assign directly to C array => try to assign by making a copy
if not self.start and not self.stop:
self.type = base_type
else:
self.type = PyrexTypes.CPtrType(base_type.base_type)
elif base_type.is_string or base_type.is_cpp_string:
self.type = default_str_type(env)
elif base_type.is_pyunicode_ptr:
self.type = unicode_type
elif base_type.is_ptr:
self.type = base_type
elif base_type.is_array:
# we need a ptr type here instead of an array type, as
# array types can result in invalid type casts in the C
# code
self.type = PyrexTypes.CPtrType(base_type.base_type)
else:
self.base = self.base.coerce_to_pyobject(env)
self.type = py_object_type
if base_type.is_builtin_type:
# slicing builtin types returns something of the same type
self.type = base_type
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
if self.type is py_object_type:
if (not self.start or self.start.is_literal) and \
(not self.stop or self.stop.is_literal):
# cache the constant slice object, in case we need it
none_node = NoneNode(self.pos)
self.slice = SliceNode(
self.pos,
start=copy.deepcopy(self.start or none_node),
stop=copy.deepcopy(self.stop or none_node),
step=none_node
).analyse_types(env)
else:
c_int = PyrexTypes.c_py_ssize_t_type
def allow_none(node, default_value, env):
# Coerce to Py_ssize_t, but allow None as meaning the default slice bound.
from .UtilNodes import EvalWithTempExprNode, ResultRefNode
node_ref = ResultRefNode(node)
new_expr = CondExprNode(
node.pos,
true_val=IntNode(
node.pos,
type=c_int,
value=default_value,
constant_result=int(default_value) if default_value.isdigit() else not_a_constant,
),
false_val=node_ref.coerce_to(c_int, env),
test=PrimaryCmpNode(
node.pos,
operand1=node_ref,
operator='is',
operand2=NoneNode(node.pos),
).analyse_types(env)
).analyse_result_type(env)
return EvalWithTempExprNode(node_ref, new_expr)
if self.start:
if self.start.type.is_pyobject:
self.start = allow_none(self.start, '0', env)
self.start = self.start.coerce_to(c_int, env)
if self.stop:
if self.stop.type.is_pyobject:
self.stop = allow_none(self.stop, 'PY_SSIZE_T_MAX', env)
self.stop = self.stop.coerce_to(c_int, env)
self.is_temp = 1
return self
def analyse_as_type(self, env):
base_type = self.base.analyse_as_type(env)
if base_type and not base_type.is_pyobject:
if not self.start and not self.stop:
# memory view
from . import MemoryView
env.use_utility_code(MemoryView.view_utility_code)
none_node = NoneNode(self.pos)
slice_node = SliceNode(
self.pos,
start=none_node,
stop=none_node,
step=none_node,
)
return PyrexTypes.MemoryViewSliceType(
base_type, MemoryView.get_axes_specs(env, [slice_node]))
return None
nogil_check = Node.gil_error
gil_message = "Slicing Python object"
get_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Get'})
set_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Set'})
def coerce_to(self, dst_type, env):
if ((self.base.type.is_string or self.base.type.is_cpp_string)
and dst_type in (bytes_type, bytearray_type, str_type, unicode_type)):
if (dst_type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(self.pos,
"default encoding required for conversion from '%s' to '%s'" %
(self.base.type, dst_type))
self.type = dst_type
if dst_type.is_array and self.base.type.is_array:
if not self.start and not self.stop:
# redundant slice building, copy C arrays directly
return self.base.coerce_to(dst_type, env)
# else: check array size if possible
return super(SliceIndexNode, self).coerce_to(dst_type, env)
def generate_result_code(self, code):
if not self.type.is_pyobject:
error(self.pos,
"Slicing is not currently supported for '%s'." % self.type)
return
base_result = self.base.result()
result = self.result()
start_code = self.start_code()
stop_code = self.stop_code()
if self.base.type.is_string:
base_result = self.base.result()
if self.base.type not in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
base_result = '((const char*)%s)' % base_result
if self.type is bytearray_type:
type_name = 'ByteArray'
else:
type_name = self.type.name.title()
if self.stop is None:
code.putln(
"%s = __Pyx_Py%s_FromString(%s + %s); %s" % (
result,
type_name,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_Py%s_FromStringAndSize(%s + %s, %s - %s); %s" % (
result,
type_name,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type.is_pyunicode_ptr:
base_result = self.base.result()
if self.base.type != PyrexTypes.c_py_unicode_ptr_type:
base_result = '((const Py_UNICODE*)%s)' % base_result
if self.stop is None:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicode(%s + %s); %s" % (
result,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicodeAndLength(%s + %s, %s - %s); %s" % (
result,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type is unicode_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyUnicode_Substring", "StringTools.c"))
code.putln(
"%s = __Pyx_PyUnicode_Substring(%s, %s, %s); %s" % (
result,
base_result,
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
elif self.type is py_object_type:
code.globalstate.use_utility_code(self.get_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.putln(
"%s = __Pyx_PyObject_GetSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d); %s" % (
result,
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound']),
code.error_goto_if_null(result, self.pos)))
else:
if self.base.type is list_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyList_GetSlice'
elif self.base.type is tuple_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyTuple_GetSlice'
else:
cfunc = 'PySequence_GetSlice'
code.putln(
"%s = %s(%s, %s, %s); %s" % (
result,
cfunc,
self.base.py_result(),
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
self.generate_gotref(code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
code.globalstate.use_utility_code(self.set_slice_utility_code)
has_c_start, has_c_stop, c_start, c_stop, py_start, py_stop, py_slice = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_SetSlice(%s, %s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
rhs.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
else:
start_offset = self.start_code() if self.start else '0'
if rhs.type.is_array:
array_length = rhs.type.size
self.generate_slice_guard_code(code, array_length)
else:
array_length = '%s - %s' % (self.stop_code(), start_offset)
# Manage reference count of elements of cypclass arrays => DECREF overwritten references
if (self.type.is_array or self.type.is_ptr) and self.type.base_type.is_cyp_class:
loop_idx = code.funcstate.allocate_temp(PyrexTypes.c_size_t_type, manage_ref=False)
code.putln("for (%s = %s; %s < %s; %s++) {" % (loop_idx, start_offset, loop_idx, array_length, loop_idx))
code.putln("Cy_XDECREF(%s[%s]);" % (self.base.result(), loop_idx))
code.putln("}")
code.funcstate.release_temp(loop_idx)
code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
code.putln("memcpy(&(%s[%s]), %s, sizeof(%s[0]) * (%s));" % (
self.base.result(), start_offset,
rhs.result(),
self.base.result(), array_length
))
# Manage reference count of elements of cypclass arrays => INCREF copied references
if (self.type.is_array or self.type.is_ptr) and self.type.base_type.is_cyp_class:
loop_idx = code.funcstate.allocate_temp(PyrexTypes.c_size_t_type, manage_ref=False)
code.putln("for (%s = %s; %s < %s; %s++) {" % (loop_idx, start_offset, loop_idx, array_length, loop_idx))
code.putln("Cy_INCREF(%s[%s]);" % (self.base.result(), loop_idx))
code.putln("}")
code.funcstate.release_temp(loop_idx)
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if not self.base.type.is_pyobject:
error(self.pos,
"Deleting slices is only supported for Python types, not '%s'." % self.type)
return
self.generate_subexpr_evaluation_code(code)
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_DelSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def get_slice_config(self):
has_c_start, c_start, py_start = False, '0', 'NULL'
if self.start:
has_c_start = not self.start.type.is_pyobject
if has_c_start:
c_start = self.start.result()
else:
py_start = '&%s' % self.start.py_result()
has_c_stop, c_stop, py_stop = False, '0', 'NULL'
if self.stop:
has_c_stop = not self.stop.type.is_pyobject
if has_c_stop:
c_stop = self.stop.result()
else:
py_stop = '&%s' % self.stop.py_result()
py_slice = self.slice and '&%s' % self.slice.py_result() or 'NULL'
return (has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice)
def generate_slice_guard_code(self, code, target_size):
if not self.base.type.is_array:
return
slice_size = self.base.type.size
try:
total_length = slice_size = int(slice_size)
except ValueError:
total_length = None
start = stop = None
if self.stop:
stop = self.stop.result()
try:
stop = int(stop)
if stop < 0:
if total_length is None:
slice_size = '%s + %d' % (slice_size, stop)
else:
slice_size += stop
else:
slice_size = stop
stop = None
except ValueError:
pass
if self.start:
start = self.start.result()
try:
start = int(start)
if start < 0:
if total_length is None:
start = '%s + %d' % (self.base.type.size, start)
else:
start += total_length
if isinstance(slice_size, _py_int_types):
slice_size -= start
else:
slice_size = '%s - (%s)' % (slice_size, start)
start = None
except ValueError:
pass
runtime_check = None
compile_time_check = False
try:
int_target_size = int(target_size)
except ValueError:
int_target_size = None
else:
compile_time_check = isinstance(slice_size, _py_int_types)
if compile_time_check and slice_size < 0:
if int_target_size > 0:
error(self.pos, "Assignment to empty slice.")
elif compile_time_check and start is None and stop is None:
# we know the exact slice length
if int_target_size != slice_size:
error(self.pos, "Assignment to slice of wrong length, expected %s, got %s" % (
slice_size, target_size))
elif start is not None:
if stop is None:
stop = slice_size
runtime_check = "(%s)-(%s)" % (stop, start)
elif stop is not None:
runtime_check = stop
else:
runtime_check = slice_size
if runtime_check:
code.putln("if (unlikely((%s) != (%s))) {" % (runtime_check, target_size))
code.putln(
'PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length,'
' expected %%" CYTHON_FORMAT_SSIZE_T "d, got %%" CYTHON_FORMAT_SSIZE_T "d",'
' (Py_ssize_t)(%s), (Py_ssize_t)(%s));' % (
target_size, runtime_check))
code.putln(code.error_goto(self.pos))
code.putln("}")
def start_code(self):
if self.start:
return self.start.result()
else:
return "0"
def stop_code(self):
if self.stop:
return self.stop.result()
elif self.base.type.is_array:
return self.base.type.size
else:
return "PY_SSIZE_T_MAX"
def calculate_result_code(self):
# self.result() is not used, but this method must exist
return ""
class SliceNode(ExprNode):
# start:stop:step in subscript list
#
# start ExprNode
# stop ExprNode
# step ExprNode
subexprs = ['start', 'stop', 'step']
is_slice = True
type = slice_type
is_temp = 1
def calculate_constant_result(self):
self.constant_result = slice(
self.start.constant_result,
self.stop.constant_result,
self.step.constant_result)
def compile_time_value(self, denv):
start = self.start.compile_time_value(denv)
stop = self.stop.compile_time_value(denv)
step = self.step.compile_time_value(denv)
try:
return slice(start, stop, step)
except Exception as e:
self.compile_time_value_error(e)
def may_be_none(self):
return False
def analyse_types(self, env):
start = self.start.analyse_types(env)
stop = self.stop.analyse_types(env)
step = self.step.analyse_types(env)
self.start = start.coerce_to_pyobject(env)
self.stop = stop.coerce_to_pyobject(env)
self.step = step.coerce_to_pyobject(env)
if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
self.is_literal = True
self.is_temp = False
return self
gil_message = "Constructing Python slice object"
def calculate_result_code(self):
return self.result_code
def generate_result_code(self, code):
if self.is_literal:
dedup_key = make_dedup_key(self.type, (self,))
self.result_code = code.get_py_const(py_object_type, 'slice', cleanup_level=2, dedup_key=dedup_key)
code = code.get_cached_constants_writer(self.result_code)
if code is None:
return # already initialised
code.mark_pos(self.pos)
code.putln(
"%s = PySlice_New(%s, %s, %s); %s" % (
self.result(),
self.start.py_result(),
self.stop.py_result(),
self.step.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
if self.is_literal:
self.generate_giveref(code)
class SliceIntNode(SliceNode):
# start:stop:step in subscript list
# This is just a node to hold start,stop and step nodes that can be
# converted to integers. This does not generate a slice python object.
#
# start ExprNode
# stop ExprNode
# step ExprNode
is_temp = 0
def calculate_constant_result(self):
self.constant_result = slice(
self.start.constant_result,
self.stop.constant_result,
self.step.constant_result)
def compile_time_value(self, denv):
start = self.start.compile_time_value(denv)
stop = self.stop.compile_time_value(denv)
step = self.step.compile_time_value(denv)
try:
return slice(start, stop, step)
except Exception as e:
self.compile_time_value_error(e)
def may_be_none(self):
return False
def analyse_types(self, env):
self.start = self.start.analyse_types(env)
self.stop = self.stop.analyse_types(env)
self.step = self.step.analyse_types(env)
if not self.start.is_none:
self.start = self.start.coerce_to_integer(env)
if not self.stop.is_none:
self.stop = self.stop.coerce_to_integer(env)
if not self.step.is_none:
self.step = self.step.coerce_to_integer(env)
if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
self.is_literal = True
self.is_temp = False
return self
def calculate_result_code(self):
pass
def generate_result_code(self, code):
for a in self.start,self.stop,self.step:
if isinstance(a, CloneNode):
a.arg.result()
class CallNode(ExprNode):
# allow overriding the default 'may_be_none' behaviour
may_return_none = None
def infer_type(self, env):
# TODO(robertwb): Reduce redundancy with analyse_types.
function = self.function
func_type = function.infer_type(env)
if isinstance(function, NewExprNode):
# note: needs call to infer_type() above
if function.class_type.is_cyp_class:
return function.class_type
else:
return PyrexTypes.CPtrType(function.class_type)
if func_type is py_object_type:
# function might have lied for safety => try to find better type
entry = getattr(function, 'entry', None)
if entry is not None:
func_type = entry.type or func_type
if func_type.is_ptr:
func_type = func_type.base_type
if func_type.is_cfunction or func_type.is_cyp_class:
alternatives = None
cypclass_wrapper_entry = None
if func_type.is_cyp_class:
cypclass_wrapper_entry = func_type.scope.lookup_here("")
func_type = cypclass_wrapper_entry.type
if hasattr(self, 'args'):
if cypclass_wrapper_entry:
alternatives = cypclass_wrapper_entry.all_alternatives()
elif getattr(self.function, 'entry', None):
alternatives = self.function.entry.all_alternatives()
if alternatives:
arg_types = [arg.infer_type(env) for arg in self.args]
func_entry = PyrexTypes.best_match(arg_types, alternatives)
if func_entry:
func_type = func_entry.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type.return_type
return func_type.return_type
elif func_type is type_type:
if function.is_name and function.entry and function.entry.type:
result_type = function.entry.type
if result_type.is_extension_type:
return result_type
elif result_type.is_builtin_type:
if function.entry.name == 'float':
return PyrexTypes.c_double_type
elif function.entry.name in Builtin.types_that_construct_their_instance:
return result_type
func_type = self.function.analyse_as_type(env)
if func_type and (func_type.is_struct_or_union or func_type.is_cpp_class):
return func_type
return py_object_type
def type_dependencies(self, env):
# TODO: Update when Danilo's C++ code merged in to handle the
# the case of function overloading.
return self.function.type_dependencies(env)
def is_simple(self):
# C function calls could be considered simple, but they may
# have side-effects that may hit when multiple operations must
# be effected in order, e.g. when constructing the argument
# sequence for a function call or comparing values.
return False
def may_be_none(self):
if self.may_return_none is not None:
return self.may_return_none
func_type = self.function.type
if func_type is type_type and self.function.is_name:
entry = self.function.entry
if entry.type.is_extension_type:
return False
if (entry.type.is_builtin_type and
entry.name in Builtin.types_that_construct_their_instance):
return False
return ExprNode.may_be_none(self)
def set_py_result_type(self, function, func_type=None):
if func_type is None:
func_type = function.type
if func_type is Builtin.type_type and (
function.is_name and
function.entry and
function.entry.is_builtin and
function.entry.name in Builtin.types_that_construct_their_instance):
# calling a builtin type that returns a specific object type
if function.entry.name == 'float':
# the following will come true later on in a transform
self.type = PyrexTypes.c_double_type
self.result_ctype = PyrexTypes.c_double_type
else:
self.type = Builtin.builtin_types[function.entry.name]
self.result_ctype = py_object_type
self.may_return_none = False
elif function.is_name and function.type_entry:
# We are calling an extension type constructor. As long as we do not
# support __new__(), the result type is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
def analyse_as_type_constructor(self, env):
type = self.function.analyse_as_type(env)
if type and type.is_struct_or_union:
args, kwds = self.explicit_args_kwds()
items = []
for arg, member in zip(args, type.scope.var_entries):
items.append(DictItemNode(pos=arg.pos, key=StringNode(pos=arg.pos, value=member.name), value=arg))
if kwds:
items += kwds.key_value_pairs
self.key_value_pairs = items
self.__class__ = DictNode
self.analyse_types(env) # FIXME
self.coerce_to(type, env)
return True
elif type and type.is_cpp_class:
self.args = [ arg.analyse_types(env) for arg in self.args ]
constructor = type.scope.lookup("")
constructor_type = None
constructor_cname = None
if type.is_cyp_class:
constructor = wrapper = type.scope.lookup_here("")
if not wrapper:
error(self.function.pos, "no constructor wrapper found for Cypclass type '%s'" % self.function.name)
constructor_cname = wrapper.func_cname
constructor_type = wrapper.type
elif not constructor:
error(self.function.pos, "no constructor found for C++ type '%s'" % self.function.name)
self.type = error_type
return self
else:
constructor_type = constructor.type
constructor_cname = type.empty_declaration_code()
self.function = RawCNameExprNode(self.function.pos, constructor_type)
self.function.entry = constructor
self.function.set_cname(constructor_cname)
self.analyse_c_function_call(env)
if type.is_cyp_class:
self.type = constructor_type.return_type
else:
self.type = type
return True
def is_lvalue(self):
return self.type.is_reference
def nogil_check(self, env):
func_type = self.function_type()
if func_type.is_pyobject:
self.gil_error()
elif not func_type.is_error and not getattr(func_type, 'nogil', False):
self.gil_error()
gil_message = "Calling gil-requiring function"
class SimpleCallNode(CallNode):
# Function call without keyword, * or ** args.
#
# function ExprNode
# args [ExprNode]
# arg_tuple ExprNode or None used internally
# self ExprNode or None used internally
# coerced_self ExprNode or None used internally
# wrapper_call bool used internally
# has_optional_args bool used internally
# nogil bool used internally
#
# analysed bool used internally
# overflowcheck bool used internally
# explicit_cpp_self bool used internally
# needs_deref bool used internally
subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple']
self = None
coerced_self = None
arg_tuple = None
wrapper_call = False
has_optional_args = False
nogil = False
analysed = False
overflowcheck = False
explicit_cpp_self = None
needs_deref = False
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
args = [arg.compile_time_value(denv) for arg in self.args]
try:
return function(*args)
except Exception as e:
self.compile_time_value_error(e)
@classmethod
def for_cproperty(cls, pos, obj, entry):
# Create a call node for C property access.
property_scope = entry.scope
getter_entry = property_scope.lookup_here(entry.name)
assert getter_entry, "Getter not found in scope %s: %s" % (property_scope, property_scope.entries)
function = NameNode(pos, name=entry.name, entry=getter_entry, type=getter_entry.type)
node = cls(pos, function=function, args=[obj])
return node
def analyse_as_type(self, env):
attr = self.function.as_cython_attribute()
if attr == 'pointer':
if len(self.args) != 1:
error(self.args.pos, "only one type allowed.")
else:
type = self.args[0].analyse_as_type(env)
if not type:
error(self.args[0].pos, "Unknown type")
else:
return PyrexTypes.CPtrType(type)
elif attr == 'typeof':
if len(self.args) != 1:
error(self.args.pos, "only one type allowed.")
operand = self.args[0].analyse_types(env)
return operand.type
def explicit_args_kwds(self):
return self.args, None
def analyse_types(self, env):
self.is_in_cpp = env.is_cpp()
if self.analyse_as_type_constructor(env):
return self
if self.analysed:
return self
self.analysed = True
self.function.is_called = 1
self.function = self.function.analyse_types(env)
function = self.function
if isinstance(function, NewExprNode) and len(self.args) and function.class_type.is_cyp_class:
error(self.pos, "Cypclasses must be constructed without arguments when using new")
if function.is_attribute and function.entry and function.entry.is_cmethod:
# Take ownership of the object from which the attribute
# was obtained, because we need to pass it as 'self'.
self.self = function.obj
function.obj = CloneNode(self.self)
func_type = self.function_type()
self.is_numpy_call_with_exprs = False
if (has_np_pythran(env) and function.is_numpy_attribute and
pythran_is_numpy_func_supported(function)):
has_pythran_args = True
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env)
for arg in self.arg_tuple.args:
has_pythran_args &= is_pythran_supported_node_or_none(arg)
self.is_numpy_call_with_exprs = bool(has_pythran_args)
if self.is_numpy_call_with_exprs:
env.add_include_file(pythran_get_func_include_file(function))
return NumPyMethodCallNode.from_node(
self,
function_cname=pythran_functor(function),
arg_tuple=self.arg_tuple,
type=PythranExpr(pythran_func_type(function, self.arg_tuple.args)),
)
elif func_type.is_pyobject:
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env).coerce_to_pyobject(env)
self.args = None
self.set_py_result_type(function, func_type)
self.is_temp = 1
else:
self.args = [ arg.analyse_types(env) for arg in self.args ]
self.analyse_c_function_call(env)
if func_type.exception_check in ('+', '~') or self.type.is_cyp_class:
self.is_temp = True
return self
def function_type(self):
# Return the type of the function being called, coercing a function
# pointer to a function if necessary. If the function has fused
# arguments, return the specific type.
func_type = self.function.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type
def analyse_c_function_call(self, env):
func_type = self.function.type
if func_type is error_type:
self.type = error_type
return
if func_type.is_cfunction and func_type.is_static_method:
if self.self and self.self.type.is_extension_type:
# To support this we'd need to pass self to determine whether
# it was overloaded in Python space (possibly via a Cython
# superclass turning a cdef method into a cpdef one).
error(self.pos, "Cannot call a static method on an instance variable.")
args = self.args
elif self.self:
args = [self.self] + self.args
elif (func_type.is_cfunction and len(self.args) > 0
and not func_type.is_static_method
and self.function.is_name and self.function.entry
and hasattr(self.function.entry, 'scope')
and self.function.entry.scope.is_cpp_class_scope
and self.function.entry.scope.type.is_cyp_class):
self.explicit_cpp_self = self.args[0].result()
args = self.args[1:]
else:
args = self.args
if func_type.is_cpp_class:
# operator() cannot be a non-member function
overloaded_entry = self.function.type.scope.lookup_here("operator()")
if overloaded_entry is None:
self.type = PyrexTypes.error_type
self.result_code = ""
error(self.pos, "Object of type '%s' is not callable." % self.function.type)
return
self.function = CoerceFromCallable(self.function)
elif hasattr(self.function, 'entry'):
overloaded_entry = self.function.entry
elif self.function.is_subscript and self.function.is_fused_index:
overloaded_entry = self.function.type.entry
else:
overloaded_entry = None
if overloaded_entry:
if self.function.type.is_fused:
functypes = self.function.type.get_all_specialized_function_types()
alternatives = [f.entry for f in functypes]
else:
alternatives = overloaded_entry.all_alternatives()
entry = PyrexTypes.best_match(
[arg.type for arg in args], alternatives, self.pos, env, args)
if not entry:
self.type = PyrexTypes.error_type
self.result_code = ""
return
entry.used = True
if not func_type.is_cpp_class:
self.function.entry = entry
elif func_type.is_cyp_class:
self.needs_deref = True
self.function.type = entry.type
func_type = self.function_type()
else:
entry = None
func_type = self.function_type()
if not func_type.is_cfunction:
error(self.pos, "Calling non-function type '%s'" % func_type)
self.type = PyrexTypes.error_type
self.result_code = ""
return
# Check no. of args
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(args)
if func_type.optional_arg_count and expected_nargs != actual_nargs:
self.has_optional_args = 1
self.is_temp = 1
# check 'self' argument
if entry and entry.is_cmethod and func_type.args and not func_type.is_static_method:
formal_arg = func_type.args[0]
arg = args[0]
if formal_arg.not_none:
if self.self:
self.self = self.self.as_none_safe_node(
"'NoneType' object has no attribute '%{0}s'".format('.30' if len(entry.name) <= 30 else ''),
error='PyExc_AttributeError',
format_args=[entry.name])
else:
# unbound method
arg = arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=[entry.name, formal_arg.type.name])
if self.self:
if formal_arg.accept_builtin_subtypes:
arg = CMethodSelfCloneNode(self.self)
else:
arg = CloneNode(self.self)
arg = self.coerced_self = arg.coerce_to(formal_arg.type, env)
elif formal_arg.type.is_builtin_type:
# special case: unbound methods of builtins accept subtypes
arg = arg.coerce_to(formal_arg.type, env)
if arg.type.is_builtin_type and isinstance(arg, PyTypeTestNode):
arg.exact_builtin_type = False
args[0] = arg
# Coerce arguments
some_args_in_temps = False
for i in range(min(max_nargs, actual_nargs)):
formal_arg = func_type.args[i]
formal_type = formal_arg.type
arg = args[i].coerce_to(formal_type, env)
if formal_arg.not_none:
# C methods must do the None checks at *call* time
arg = arg.as_none_safe_node(
"cannot pass None into a C function argument that is declared 'not None'")
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if i == 0 and self.self is not None:
# a method's cloned "self" argument is ok
pass
elif arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
elif arg.type.is_cyp_class:
if i > 0:
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
args[i] = arg
# handle additional varargs parameters
for i in range(max_nargs, actual_nargs):
arg = args[i]
if arg.type.is_pyobject:
if arg.type is str_type:
arg_ctype = PyrexTypes.c_char_ptr_type
else:
arg_ctype = arg.type.default_coerced_ctype()
if arg_ctype is None:
error(self.args[i].pos,
"Python object cannot be passed as a varargs parameter")
else:
args[i] = arg = arg.coerce_to(arg_ctype, env)
if arg.is_temp and i > 0:
some_args_in_temps = True
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in range(actual_nargs-1):
if i == 0 and self.self is not None:
continue # self is ok
arg = args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0 or i == 1 and self.self is not None: # skip first arg
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
self.args[:] = args
# Calc result type and code fragment
if isinstance(self.function, NewExprNode):
if self.function.class_type.is_cyp_class:
self.type = self.function.class_type
else:
self.type = PyrexTypes.CPtrType(self.function.class_type)
else:
self.type = func_type.return_type.as_returned_type()
if self.function.is_name or self.function.is_attribute:
func_entry = self.function.entry
if func_entry and (func_entry.utility_code or func_entry.utility_code_definition):
self.is_temp = 1 # currently doesn't work for self.calculate_result_code()
if self.type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
elif func_type.exception_value is not None or func_type.exception_check:
self.is_temp = 1
elif self.type.is_memoryviewslice:
self.is_temp = 1
# func_type.exception_check = True
elif self.type.is_cyp_class:
self.is_temp = 1
if self.is_temp and self.type.is_reference:
self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type)
# Called in 'nogil' context?
self.nogil = env.nogil
if (self.nogil and
func_type.exception_check and
func_type.exception_check not in ('+', '~')):
env.use_utility_code(pyerr_occurred_withgil_utility_code)
# C++ exception handler
if func_type.exception_check == '+':
if func_type.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
self.overflowcheck = env.directives['overflowcheck']
def calculate_result_code(self):
return self.c_call_code()
def c_call_code(self):
func_type = self.function_type()
if self.type is PyrexTypes.error_type or not func_type.is_cfunction:
return ""
formal_args = func_type.args
arg_list_code = []
args = list(zip(formal_args, self.args))
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(self.args)
if self.type.is_cyp_class and isinstance(self.function, NewExprNode):
return "%s()" % self.function.result()
for formal_arg, actual_arg in args[:expected_nargs]:
arg_code = actual_arg.move_result_rhs_as(formal_arg.type)
arg_list_code.append(arg_code)
if func_type.is_overridable:
arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod)))
if func_type.optional_arg_count:
if expected_nargs == actual_nargs:
# Cast NULL to optional struct type to avoid ambiguous calls in C++
# is_in_cpp is set in above analyse_types, called earlier in the compilation process
if self.is_in_cpp:
optional_args = '(%s *)NULL' % func_type.op_arg_struct.base_type.cname
else:
optional_args = 'NULL'
else:
optional_args = "&%s" % self.opt_arg_struct
arg_list_code.append(optional_args)
for actual_arg in self.args[len(formal_args):]:
arg_list_code.append(actual_arg.move_result_rhs())
result = None
# Checking for self argument in cppclasses given as an explicit first
# argument. This is the opposite of self.self purpose.
if self.explicit_cpp_self:
result = "%s->%s(%s)" % (self.explicit_cpp_self, self.function.result(), ', '.join(arg_list_code))
elif self.needs_deref:
result = "(*%s)(%s)" % (self.function.result(), ', '.join(arg_list_code))
else:
result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code))
return result
def is_c_result_required(self):
func_type = self.function_type()
if func_type.exception_check == '~':
return True
if not func_type.exception_value or func_type.exception_check == '+':
return False # skip allocation of unused result temp
return True
def generate_evaluation_code(self, code):
function = self.function
if function.is_name or function.is_attribute:
code.globalstate.use_entry_utility_code(function.entry)
abs_function_cnames = ('abs', 'labs', '__Pyx_abs_longlong')
is_signed_int = self.type.is_int and self.type.signed
if self.overflowcheck and is_signed_int and function.result() in abs_function_cnames:
code.globalstate.use_utility_code(UtilityCode.load_cached("Common", "Overflow.c"))
code.putln('if (unlikely(%s == __PYX_MIN(%s))) {\
PyErr_SetString(PyExc_OverflowError,\
"Trying to take the absolute value of the most negative integer is not defined."); %s; }' % (
self.args[0].result(),
self.args[0].type.empty_declaration_code(),
code.error_goto(self.pos)))
if not function.type.is_pyobject or len(self.arg_tuple.args) > 1 or (
self.arg_tuple.args and self.arg_tuple.is_literal):
super(SimpleCallNode, self).generate_evaluation_code(code)
return
# Special case 0-args and try to avoid explicit tuple creation for Python calls with 1 arg.
arg = self.arg_tuple.args[0] if self.arg_tuple.args else None
subexprs = (self.self, self.coerced_self, function, arg)
for subexpr in subexprs:
if subexpr is not None:
subexpr.generate_evaluation_code(code)
code.mark_pos(self.pos)
assert self.is_temp
self.allocate_temp_result(code)
if arg is None:
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCallNoArg", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
self.result(),
function.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
else:
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCallOneArg", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % (
self.result(),
function.py_result(),
arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
for subexpr in subexprs:
if subexpr is not None:
subexpr.generate_disposal_code(code)
subexpr.free_temps(code)
def generate_result_code(self, code):
func_type = self.function_type()
if func_type.is_pyobject:
arg_code = self.arg_tuple.py_result()
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
self.function.py_result(),
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
elif func_type.is_cfunction:
if self.has_optional_args:
actual_nargs = len(self.args)
expected_nargs = len(func_type.args) - func_type.optional_arg_count
self.opt_arg_struct = code.funcstate.allocate_temp(
func_type.op_arg_struct.base_type, manage_ref=True)
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
Naming.pyrex_prefix + "n",
len(self.args) - expected_nargs))
args = list(zip(func_type.args, self.args))
for formal_arg, actual_arg in args[expected_nargs:actual_nargs]:
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
func_type.opt_arg_cname(formal_arg.name),
actual_arg.result_as(formal_arg.type)))
exc_checks = []
if self.type.is_pyobject and self.is_temp:
exc_checks.append("!%s" % self.result())
elif self.type.is_memoryviewslice:
assert self.is_temp
exc_checks.append(self.type.error_condition(self.result()))
elif func_type.exception_check != '+':
exc_val = func_type.exception_value
exc_check = func_type.exception_check
if exc_val is not None:
exc_checks.append("%s == %s" % (self.result(), func_type.return_type.cast_code(exc_val)))
if exc_check:
if exc_check == '~':
assert self.is_temp
exc_checks.append(self.type.error_condition(self.result()))
else:
if self.nogil:
exc_checks.append("__Pyx_ErrOccurredWithGIL()")
else:
exc_checks.append("PyErr_Occurred()")
if self.is_temp or exc_checks:
rhs = self.c_call_code()
if self.result():
lhs = "%s = " % self.result()
if self.is_temp and self.type.is_pyobject:
#return_type = self.type # func_type.return_type
#print "SimpleCallNode.generate_result_code: casting", rhs, \
# "from", return_type, "to pyobject" ###
rhs = typecast(py_object_type, self.type, rhs)
else:
lhs = ""
if func_type.exception_check == '+':
translate_cpp_exception(code, self.pos, '%s%s;' % (lhs, rhs),
self.result() if self.type.is_pyobject else None,
func_type.exception_value, self.nogil)
else:
if exc_checks:
goto_error = code.error_goto_if(" && ".join(exc_checks), self.pos)
else:
goto_error = ""
code.putln("%s%s; %s" % (lhs, rhs, goto_error))
if self.type.is_pyobject and self.result():
self.generate_gotref(code)
elif self.type.is_cyp_class and self.result():
self.generate_gotref(code)
if self.has_optional_args:
code.funcstate.release_temp(self.opt_arg_struct)
class NumPyMethodCallNode(ExprNode):
# Pythran call to a NumPy function or method.
#
# function_cname string the function/method to call
# arg_tuple TupleNode the arguments as an args tuple
subexprs = ['arg_tuple']
is_temp = True
may_return_none = True
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
assert self.arg_tuple.mult_factor is None
args = self.arg_tuple.args
for arg in args:
arg.generate_evaluation_code(code)
code.putln("// function evaluation code for numpy function")
code.putln("__Pyx_call_destructor(%s);" % self.result())
code.putln("new (&%s) decltype(%s){%s{}(%s)};" % (
self.result(),
self.result(),
self.function_cname,
", ".join(a.pythran_result() for a in args)))
class PyMethodCallNode(SimpleCallNode):
# Specialised call to a (potential) PyMethodObject with non-constant argument tuple.
# Allows the self argument to be injected directly instead of repacking a tuple for it.
#
# function ExprNode the function/method object to call
# arg_tuple TupleNode the arguments for the args tuple
subexprs = ['function', 'arg_tuple']
is_temp = True
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.function.generate_evaluation_code(code)
assert self.arg_tuple.mult_factor is None
args = self.arg_tuple.args
for arg in args:
arg.generate_evaluation_code(code)
# make sure function is in temp so that we can replace the reference below if it's a method
reuse_function_temp = self.function.is_temp
if reuse_function_temp:
function = self.function.result()
else:
function = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.function.make_owned_reference(code)
code.put("%s = %s; " % (function, self.function.py_result()))
self.function.generate_disposal_code(code)
self.function.free_temps(code)
self_arg = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln("%s = NULL;" % self_arg)
arg_offset_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = 0;" % arg_offset_cname)
def attribute_is_likely_method(attr):
obj = attr.obj
if obj.is_name and obj.entry.is_pyglobal:
return False # more likely to be a function
return True
if self.function.is_attribute:
likely_method = 'likely' if attribute_is_likely_method(self.function) else 'unlikely'
elif self.function.is_name and self.function.cf_state:
# not an attribute itself, but might have been assigned from one (e.g. bound method)
for assignment in self.function.cf_state:
value = assignment.rhs
if value and value.is_attribute and value.obj.type.is_pyobject:
if attribute_is_likely_method(value):
likely_method = 'likely'
break
else:
likely_method = 'unlikely'
else:
likely_method = 'unlikely'
code.putln("if (CYTHON_UNPACK_METHODS && %s(PyMethod_Check(%s))) {" % (likely_method, function))
code.putln("%s = PyMethod_GET_SELF(%s);" % (self_arg, function))
# the following is always true in Py3 (kept only for safety),
# but is false for unbound methods in Py2
code.putln("if (likely(%s)) {" % self_arg)
code.putln("PyObject* function = PyMethod_GET_FUNCTION(%s);" % function)
code.put_incref(self_arg, py_object_type)
code.put_incref("function", py_object_type)
# free method object as early to possible to enable reuse from CPython's freelist
code.put_decref_set(function, py_object_type, "function")
code.putln("%s = 1;" % arg_offset_cname)
code.putln("}")
code.putln("}")
# actually call the function
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectFastCall", "ObjectHandling.c"))
code.putln("{")
code.putln("PyObject *__pyx_callargs[%d] = {%s, %s};" % (
len(args)+1,
self_arg,
', '.join(arg.py_result() for arg in args)))
code.putln("%s = __Pyx_PyObject_FastCall(%s, __pyx_callargs+1-%s, %d+%s);" % (
self.result(),
function,
arg_offset_cname,
len(args),
arg_offset_cname))
code.put_xdecref_clear(self_arg, py_object_type)
code.funcstate.release_temp(self_arg)
code.funcstate.release_temp(arg_offset_cname)
for arg in args:
arg.generate_disposal_code(code)
arg.free_temps(code)
code.putln(code.error_goto_if_null(self.result(), self.pos))
self.generate_gotref(code)
if reuse_function_temp:
self.function.generate_disposal_code(code)
self.function.free_temps(code)
else:
code.put_decref_clear(function, py_object_type)
code.funcstate.release_temp(function)
code.putln("}")
class InlinedDefNodeCallNode(CallNode):
# Inline call to defnode
#
# function PyCFunctionNode
# function_name NameNode
# args [ExprNode]
subexprs = ['args', 'function_name']
is_temp = 1
type = py_object_type
function = None
function_name = None
def can_be_inlined(self):
func_type= self.function.def_node
if func_type.star_arg or func_type.starstar_arg:
return False
if len(func_type.args) != len(self.args):
return False
if func_type.num_kwonly_args:
return False # actually wrong number of arguments
return True
def analyse_types(self, env):
self.function_name = self.function_name.analyse_types(env)
self.args = [ arg.analyse_types(env) for arg in self.args ]
func_type = self.function.def_node
actual_nargs = len(self.args)
# Coerce arguments
some_args_in_temps = False
for i in range(actual_nargs):
formal_type = func_type.args[i].type
arg = self.args[i].coerce_to(formal_type, env)
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
elif arg.type.is_cyp_class:
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
self.args[i] = arg
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in range(actual_nargs-1):
arg = self.args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0:
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
return self
def generate_result_code(self, code):
arg_code = [self.function_name.py_result()]
func_type = self.function.def_node
for arg, proto_arg in zip(self.args, func_type.args):
if arg.type.is_pyobject:
arg_code.append(arg.result_as(proto_arg.type))
else:
arg_code.append(arg.result())
arg_code = ', '.join(arg_code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
self.function.def_node.entry.pyfunc_cname,
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
class PythonCapiFunctionNode(ExprNode):
subexprs = []
def __init__(self, pos, py_name, cname, func_type, utility_code = None):
ExprNode.__init__(self, pos, name=py_name, cname=cname,
type=func_type, utility_code=utility_code)
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if self.utility_code:
code.globalstate.use_utility_code(self.utility_code)
def calculate_result_code(self):
return self.cname
class PythonCapiCallNode(SimpleCallNode):
# Python C-API Function call (only created in transforms)
# By default, we assume that the call never returns None, as this
# is true for most C-API functions in CPython. If this does not
# apply to a call, set the following to True (or None to inherit
# the default behaviour).
may_return_none = False
def __init__(self, pos, function_name, func_type,
utility_code = None, py_name=None, **kwargs):
self.type = func_type.return_type.as_returned_type()
self.result_ctype = self.type
self.function = PythonCapiFunctionNode(
pos, py_name, function_name, func_type,
utility_code = utility_code)
# call this last so that we can override the constructed
# attributes above with explicit keyword arguments if required
SimpleCallNode.__init__(self, pos, **kwargs)
class CachedBuiltinMethodCallNode(CallNode):
# Python call to a method of a known Python builtin (only created in transforms)
subexprs = ['obj', 'args']
is_temp = True
def __init__(self, call_node, obj, method_name, args):
super(CachedBuiltinMethodCallNode, self).__init__(
call_node.pos,
obj=obj, method_name=method_name, args=args,
may_return_none=call_node.may_return_none,
type=call_node.type)
def may_be_none(self):
if self.may_return_none is not None:
return self.may_return_none
return ExprNode.may_be_none(self)
def generate_result_code(self, code):
type_cname = self.obj.type.cname
obj_cname = self.obj.py_result()
args = [arg.py_result() for arg in self.args]
call_code = code.globalstate.cached_unbound_method_call_code(
obj_cname, type_cname, self.method_name, args)
code.putln("%s = %s; %s" % (
self.result(), call_code,
code.error_goto_if_null(self.result(), self.pos)
))
self.generate_gotref(code)
class GeneralCallNode(CallNode):
# General Python function call, including keyword,
# * and ** arguments.
#
# function ExprNode
# positional_args ExprNode Tuple of positional arguments
# keyword_args ExprNode or None Dict of keyword arguments
type = py_object_type
subexprs = ['function', 'positional_args', 'keyword_args']
nogil_check = Node.gil_error
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
positional_args = self.positional_args.compile_time_value(denv)
keyword_args = self.keyword_args.compile_time_value(denv)
try:
return function(*positional_args, **keyword_args)
except Exception as e:
self.compile_time_value_error(e)
def explicit_args_kwds(self):
if (self.keyword_args and not self.keyword_args.is_dict_literal or
not self.positional_args.is_sequence_constructor):
raise CompileError(self.pos,
'Compile-time keyword arguments must be explicit.')
return self.positional_args.args, self.keyword_args
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
self.function = self.function.analyse_types(env)
if not self.function.type.is_pyobject:
if self.function.type.is_error:
self.type = error_type
return self
if hasattr(self.function, 'entry'):
node = self.map_to_simple_call_node()
if node is not None and node is not self:
return node.analyse_types(env)
elif self.function.entry.as_variable:
self.function = self.function.coerce_to_pyobject(env)
elif node is self:
error(self.pos,
"Non-trivial keyword arguments and starred "
"arguments not allowed in cdef functions.")
else:
# error was already reported
pass
else:
self.function = self.function.coerce_to_pyobject(env)
if self.keyword_args:
self.keyword_args = self.keyword_args.analyse_types(env)
self.positional_args = self.positional_args.analyse_types(env)
self.positional_args = \
self.positional_args.coerce_to_pyobject(env)
self.set_py_result_type(self.function)
self.is_temp = 1
return self
def map_to_simple_call_node(self):
"""
Tries to map keyword arguments to declared positional arguments.
Returns self to try a Python call, None to report an error
or a SimpleCallNode if the mapping succeeds.
"""
if not isinstance(self.positional_args, TupleNode):
# has starred argument
return self
if not self.keyword_args.is_dict_literal:
# keywords come from arbitrary expression => nothing to do here
return self
function = self.function
entry = getattr(function, 'entry', None)
if not entry:
return self
function_type = entry.type
if function_type.is_ptr:
function_type = function_type.base_type
if not function_type.is_cfunction:
return self
pos_args = self.positional_args.args
kwargs = self.keyword_args
declared_args = function_type.args
if entry.is_cmethod:
declared_args = declared_args[1:] # skip 'self'
if len(pos_args) > len(declared_args):
error(self.pos, "function call got too many positional arguments, "
"expected %d, got %s" % (len(declared_args),
len(pos_args)))
return None
matched_args = set([ arg.name for arg in declared_args[:len(pos_args)]
if arg.name ])
unmatched_args = declared_args[len(pos_args):]
matched_kwargs_count = 0
args = list(pos_args)
# check for duplicate keywords
seen = set(matched_args)
has_errors = False
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name in seen:
error(arg.pos, "argument '%s' passed twice" % name)
has_errors = True
# continue to report more errors if there are any
seen.add(name)
# match keywords that are passed in order
for decl_arg, arg in zip(unmatched_args, kwargs.key_value_pairs):
name = arg.key.value
if decl_arg.name == name:
matched_args.add(name)
matched_kwargs_count += 1
args.append(arg.value)
else:
break
# match keyword arguments that are passed out-of-order, but keep
# the evaluation of non-simple arguments in order by moving them
# into temps
from .UtilNodes import EvalWithTempExprNode, LetRefNode
temps = []
if len(kwargs.key_value_pairs) > matched_kwargs_count:
unmatched_args = declared_args[len(args):]
keywords = dict([ (arg.key.value, (i+len(pos_args), arg))
for i, arg in enumerate(kwargs.key_value_pairs) ])
first_missing_keyword = None
for decl_arg in unmatched_args:
name = decl_arg.name
if name not in keywords:
# missing keyword argument => either done or error
if not first_missing_keyword:
first_missing_keyword = name
continue
elif first_missing_keyword:
if entry.as_variable:
# we might be able to convert the function to a Python
# object, which then allows full calling semantics
# with default values in gaps - currently, we only
# support optional arguments at the end
return self
# wasn't the last keyword => gaps are not supported
error(self.pos, "C function call is missing "
"argument '%s'" % first_missing_keyword)
return None
pos, arg = keywords[name]
matched_args.add(name)
matched_kwargs_count += 1
if arg.value.is_simple():
args.append(arg.value)
else:
temp = LetRefNode(arg.value)
assert temp.is_simple()
args.append(temp)
temps.append((pos, temp))
if temps:
# may have to move preceding non-simple args into temps
final_args = []
new_temps = []
first_temp_arg = temps[0][-1]
for arg_value in args:
if arg_value is first_temp_arg:
break # done
if arg_value.is_simple():
final_args.append(arg_value)
else:
temp = LetRefNode(arg_value)
new_temps.append(temp)
final_args.append(temp)
if new_temps:
args = final_args
temps = new_temps + [ arg for i,arg in sorted(temps) ]
# check for unexpected keywords
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name not in matched_args:
has_errors = True
error(arg.pos,
"C function got unexpected keyword argument '%s'" %
name)
if has_errors:
# error was reported already
return None
# all keywords mapped to positional arguments
# if we are missing arguments, SimpleCallNode will figure it out
node = SimpleCallNode(self.pos, function=function, args=args)
for temp in temps[::-1]:
node = EvalWithTempExprNode(temp, node)
return node
def generate_result_code(self, code):
if self.type.is_error: return
if self.keyword_args:
kwargs = self.keyword_args.py_result()
else:
kwargs = 'NULL'
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, %s); %s" % (
self.result(),
self.function.py_result(),
self.positional_args.py_result(),
kwargs,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
class AsTupleNode(ExprNode):
# Convert argument to tuple. Used for normalising
# the * argument of a function call.
#
# arg ExprNode
subexprs = ['arg']
is_temp = 1
def calculate_constant_result(self):
self.constant_result = tuple(self.arg.constant_result)
def compile_time_value(self, denv):
arg = self.arg.compile_time_value(denv)
try:
return tuple(arg)
except Exception as e:
self.compile_time_value_error(e)
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env).coerce_to_pyobject(env)
if self.arg.type is tuple_type:
return self.arg.as_none_safe_node("'NoneType' object is not iterable")
self.type = tuple_type
return self
def may_be_none(self):
return False
nogil_check = Node.gil_error
gil_message = "Constructing Python tuple"
def generate_result_code(self, code):
cfunc = "__Pyx_PySequence_Tuple" if self.arg.type in (py_object_type, tuple_type) else "PySequence_Tuple"
code.putln(
"%s = %s(%s); %s" % (
self.result(),
cfunc, self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
class MergedDictNode(ExprNode):
# Helper class for keyword arguments and other merged dicts.
#
# keyword_args [DictNode or other ExprNode]
subexprs = ['keyword_args']
is_temp = 1
type = dict_type
reject_duplicates = True
def calculate_constant_result(self):
result = {}
reject_duplicates = self.reject_duplicates
for item in self.keyword_args:
if item.is_dict_literal:
# process items in order
items = ((key.constant_result, value.constant_result)
for key, value in item.key_value_pairs)
else:
items = item.constant_result.iteritems()
for key, value in items:
if reject_duplicates and key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
self.constant_result = result
def compile_time_value(self, denv):
result = {}
reject_duplicates = self.reject_duplicates
for item in self.keyword_args:
if item.is_dict_literal:
# process items in order
items = [(key.compile_time_value(denv), value.compile_time_value(denv))
for key, value in item.key_value_pairs]
else:
items = item.compile_time_value(denv).iteritems()
try:
for key, value in items:
if reject_duplicates and key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
except Exception as e:
self.compile_time_value_error(e)
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return dict_type
def analyse_types(self, env):
args = [
arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after ** must be a mapping, not NoneType')
for arg in self.keyword_args
]
if len(args) == 1 and args[0].type is dict_type:
# strip this intermediate node and use the bare dict
arg = args[0]
if arg.is_name and arg.entry.is_arg and len(arg.entry.cf_assignments) == 1:
# passing **kwargs through to function call => allow NULL
arg.allow_null = True
return arg
self.keyword_args = args
return self
def may_be_none(self):
return False
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
args = iter(self.keyword_args)
item = next(args)
item.generate_evaluation_code(code)
if item.type is not dict_type:
# CPython supports calling functions with non-dicts, so do we
code.putln('if (likely(PyDict_CheckExact(%s))) {' %
item.py_result())
if item.is_dict_literal:
item.make_owned_reference(code)
code.putln("%s = %s;" % (self.result(), item.py_result()))
item.generate_post_assignment_code(code)
else:
code.putln("%s = PyDict_Copy(%s); %s" % (
self.result(),
item.py_result(),
code.error_goto_if_null(self.result(), item.pos)))
self.generate_gotref(code)
item.generate_disposal_code(code)
if item.type is not dict_type:
code.putln('} else {')
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCallOneArg", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_CallOneArg((PyObject*)&PyDict_Type, %s); %s" % (
self.result(),
item.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
item.generate_disposal_code(code)
code.putln('}')
item.free_temps(code)
helpers = set()
for item in args:
if item.is_dict_literal:
# inline update instead of creating an intermediate dict
for arg in item.key_value_pairs:
arg.generate_evaluation_code(code)
if self.reject_duplicates:
code.putln("if (unlikely(PyDict_Contains(%s, %s))) {" % (
self.result(),
arg.key.py_result()))
helpers.add("RaiseDoubleKeywords")
# FIXME: find out function name at runtime!
code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
arg.key.py_result(),
code.error_goto(self.pos)))
code.putln("}")
code.put_error_if_neg(arg.key.pos, "PyDict_SetItem(%s, %s, %s)" % (
self.result(),
arg.key.py_result(),
arg.value.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
else:
item.generate_evaluation_code(code)
if self.reject_duplicates:
# merge mapping into kwdict one by one as we need to check for duplicates
helpers.add("MergeKeywords")
code.put_error_if_neg(item.pos, "__Pyx_MergeKeywords(%s, %s)" % (
self.result(), item.py_result()))
else:
# simple case, just add all entries
helpers.add("RaiseMappingExpected")
code.putln("if (unlikely(PyDict_Update(%s, %s) < 0)) {" % (
self.result(), item.py_result()))
code.putln("if (PyErr_ExceptionMatches(PyExc_AttributeError)) "
"__Pyx_RaiseMappingExpectedError(%s);" % item.py_result())
code.putln(code.error_goto(item.pos))
code.putln("}")
item.generate_disposal_code(code)
item.free_temps(code)
for helper in sorted(helpers):
code.globalstate.use_utility_code(UtilityCode.load_cached(helper, "FunctionArguments.c"))
def annotate(self, code):
for item in self.keyword_args:
item.annotate(code)
class AttributeNode(ExprNode):
# obj.attribute
#
# obj ExprNode
# attribute string
# needs_none_check boolean Used if obj is an extension type.
# If set to True, it is known that the type is not None.
#
# Used internally:
#
# is_py_attr boolean Is a Python getattr operation
# member string C name of struct member
# is_called boolean Function call is being done on result
# entry Entry Symbol table entry of attribute
is_attribute = 1
subexprs = ['obj']
type = PyrexTypes.error_type
entry = None
is_called = 0
needs_none_check = True
is_memslice_transpose = False
is_special_lookup = False
is_py_attr = 0
def as_cython_attribute(self):
if (isinstance(self.obj, NameNode) and
self.obj.is_cython_module and not
self.attribute == u"parallel"):
return self.attribute
cy = self.obj.as_cython_attribute()
if cy:
return "%s.%s" % (cy, self.attribute)
return None
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a cpdef function
# we can create the corresponding attribute
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction and entry.as_variable:
# must be a cpdef function
self.is_temp = 1
self.entry = entry.as_variable
self.analyse_as_python_attribute(env)
return self
return ExprNode.coerce_to(self, dst_type, env)
def calculate_constant_result(self):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
return
self.constant_result = getattr(self.obj.constant_result, attr)
def compile_time_value(self, denv):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
error(self.pos,
"Invalid attribute name '%s' in compile-time expression" % attr)
return None
obj = self.obj.compile_time_value(denv)
try:
return getattr(obj, attr)
except Exception as e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return self.obj.type_dependencies(env)
def infer_type(self, env):
# FIXME: this is way too redundant with analyse_types()
node = self.analyse_as_cimported_attribute_node(env, target=False)
if node is not None:
return node.entry.type
node = self.analyse_as_type_attribute(env)
if node is not None:
return node.entry.type
obj_type = self.obj.infer_type(env)
self.analyse_attribute(env, obj_type=obj_type)
if obj_type.is_builtin_type and self.type.is_cfunction:
# special case: C-API replacements for C methods of
# builtin types cannot be inferred as C functions as
# that would prevent their use as bound methods
return py_object_type
elif self.entry and self.entry.is_cmethod:
# special case: bound methods should not be inferred
# as their unbound method types
return py_object_type
return self.type
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, target = 1)
if node.type.is_const:
error(self.pos, "Assignment to const attribute '%s'" % self.attribute)
if not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type)
return node
def analyse_types(self, env, target = 0):
self.is_target = target
self.initialized_check = env.directives['initializedcheck']
node = self.analyse_as_cimported_attribute_node(env, target)
if node is None and not target:
node = self.analyse_as_type_attribute(env)
if node is None:
node = self.analyse_as_ordinary_attribute_node(env, target)
assert node is not None
if (node.is_attribute or node.is_name) and node.entry:
node.entry.used = True
if node.is_attribute:
node.wrap_obj_in_nonecheck(env)
return node
def analyse_as_cimported_attribute_node(self, env, target):
# Try to interpret this as a reference to an imported
# C const, type, var or function. If successful, mutates
# this node into a NameNode and returns 1, otherwise
# returns 0.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and (
entry.is_cglobal or entry.is_cfunction
or entry.is_type or entry.is_const):
return self.as_name_node(env, entry, target)
if self.is_cimported_module_without_shadow(env):
error(self.pos, "cimported module has no attribute '%s'" % self.attribute)
return self
return None
def analyse_as_type_attribute(self, env):
# Try to interpret this as a reference to an unbound
# C method of an extension type or builtin type. If successful,
# creates a corresponding NameNode and returns it, otherwise
# returns None.
if self.obj.is_string_literal:
return
type = self.obj.analyse_as_type(env)
if type:
if type.is_extension_type or type.is_builtin_type or type.is_cpp_class:
entry = type.scope.lookup_here(self.attribute)
if entry and (entry.is_cmethod or type.is_cpp_class and entry.type.is_cfunction):
if type.is_builtin_type:
if not self.is_called:
# must handle this as Python object
return None
ubcm_entry = entry
else:
# Create a temporary entry describing the C method
# as an ordinary function.
if entry.func_cname and not hasattr(entry.type, 'op_arg_struct') and not type.is_cyp_class:
cname = entry.func_cname
if entry.type.is_static_method or (
env.parent_scope and env.parent_scope.is_cpp_class_scope):
ctype = entry.type
elif type.is_cpp_class:
error(self.pos, "%s not a static member of %s" % (entry.name, type))
ctype = PyrexTypes.error_type
else:
# Fix self type.
ctype = copy.copy(entry.type)
ctype.args = ctype.args[:]
ctype.args[0] = PyrexTypes.CFuncTypeArg('self', type, 'self', None)
elif type.is_cyp_class and entry.func_cname:
cname = entry.func_cname
ctype = entry.type
else:
cname = "%s->%s" % (type.vtabptr_cname, entry.cname)
ctype = entry.type
ubcm_entry = Symtab.Entry(entry.name, cname, ctype)
ubcm_entry.is_cfunction = 1
ubcm_entry.func_cname = entry.func_cname
ubcm_entry.is_unbound_cmethod = 1
ubcm_entry.scope = entry.scope
if type.is_cpp_class:
ubcm_entry.overloaded_alternatives = [
Symtab.Entry(entry.name, entry.func_cname, entry.type)
for entry in entry.overloaded_alternatives
]
self.entry = ubcm_entry
return self.as_name_node(env, ubcm_entry, target=False)
elif type.is_enum or type.is_cpp_enum:
if self.attribute in type.values:
for entry in type.entry.enum_values:
if entry.name == self.attribute:
return self.as_name_node(env, entry, target=False)
else:
error(self.pos, "%s not a known value of %s" % (self.attribute, type))
else:
error(self.pos, "%s not a known value of %s" % (self.attribute, type))
return None
def analyse_as_type(self, env):
module_scope = self.obj.analyse_as_module(env)
if module_scope:
return module_scope.lookup_type(self.attribute)
if not self.obj.is_string_literal:
base_type = self.obj.analyse_as_type(env)
if base_type and getattr(base_type, 'scope', None) is not None:
return base_type.scope.lookup_type(self.attribute)
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type
# in a cimported module. Returns the extension type, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module
# in another cimported module. Returns the module scope, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.as_module:
return entry.as_module
return None
def as_name_node(self, env, entry, target):
# Create a corresponding NameNode from this node and complete the
# analyse_types phase.
node = NameNode.from_node(self, name=self.attribute, entry=entry)
if target:
node = node.analyse_target_types(env)
else:
node = node.analyse_rvalue_entry(env)
node.entry.used = 1
return node
def analyse_as_ordinary_attribute_node(self, env, target):
self.obj = self.obj.analyse_types(env)
self.analyse_attribute(env)
if self.entry and self.entry.is_cmethod and not self.is_called:
# error(self.pos, "C method can only be called")
pass
## Reference to C array turns into pointer to first element.
#while self.type.is_array:
# self.type = self.type.element_ptr_type()
if self.is_py_attr:
if not target:
self.is_temp = 1
self.result_ctype = py_object_type
elif target and self.obj.type.is_builtin_type:
error(self.pos, "Assignment to an immutable object field")
elif self.entry and self.entry.is_cproperty:
if not target:
return SimpleCallNode.for_cproperty(self.pos, self.obj, self.entry).analyse_types(env)
# TODO: implement writable C-properties?
error(self.pos, "Assignment to a read-only property")
#elif self.type.is_memoryviewslice and not target:
# self.is_temp = True
return self
def analyse_attribute(self, env, obj_type = None):
# Look up attribute and set self.type and self.member.
immutable_obj = obj_type is not None # used during type inference
self.is_py_attr = 0
self.member = self.attribute
if obj_type is None:
if self.obj.type.is_string or self.obj.type.is_pyunicode_ptr:
self.obj = self.obj.coerce_to_pyobject(env)
obj_type = self.obj.type
else:
if obj_type.is_string or obj_type.is_pyunicode_ptr:
obj_type = py_object_type
if obj_type.is_ptr or obj_type.is_array:
obj_type = obj_type.base_type
self.op = "->"
elif obj_type.is_extension_type or obj_type.is_builtin_type:
self.op = "->"
elif obj_type.is_reference and obj_type.is_fake_reference:
self.op = "->"
elif obj_type.is_cyp_class:
self.op = "->"
else:
self.op = "."
if obj_type.has_attributes:
if obj_type.attributes_known():
entry = obj_type.scope.lookup_here(self.attribute)
if obj_type.is_memoryviewslice and not entry:
if self.attribute == 'T':
self.is_memslice_transpose = True
self.is_temp = True
self.use_managed_ref = True
self.type = self.obj.type.transpose(self.pos)
return
else:
obj_type.declare_attribute(self.attribute, env, self.pos)
entry = obj_type.scope.lookup_here(self.attribute)
if entry and entry.is_member:
entry = None
else:
error(self.pos,
"Cannot select attribute of incomplete type '%s'"
% obj_type)
self.type = PyrexTypes.error_type
return
self.entry = entry
if entry:
if obj_type.is_extension_type and entry.name == "__weakref__":
error(self.pos, "Illegal use of special attribute __weakref__")
# def methods need the normal attribute lookup
# because they do not have struct entries
# fused function go through assignment synthesis
# (foo = pycfunction(foo_func_obj)) and need to go through
# regular Python lookup as well
if entry.is_cproperty:
self.type = entry.type
return
elif (entry.is_variable and not entry.fused_cfunction) or entry.is_cmethod:
self.type = entry.type
self.member = entry.cname
return
elif entry.type.is_cpp_class and entry.is_type:
# This could be a c++ class trying to access a parent type
self.type = entry.type
self.member = entry.cname
return
else:
# If it's not a variable or C method, it must be a Python
# method of an extension type, so we treat it like a Python
# attribute.
pass
# If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
# attribute reference.
self.analyse_as_python_attribute(env, obj_type, immutable_obj)
def analyse_as_python_attribute(self, env, obj_type=None, immutable_obj=False):
if obj_type is None:
obj_type = self.obj.type
# mangle private '__*' Python attributes used inside of a class
self.attribute = env.mangle_class_private_name(self.attribute)
self.member = self.attribute
self.type = py_object_type
self.is_py_attr = 1
if not obj_type.is_pyobject and not obj_type.is_error:
# Expose python methods for immutable objects.
if (obj_type.is_string or obj_type.is_cpp_string
or obj_type.is_buffer or obj_type.is_memoryviewslice
or obj_type.is_numeric
or (obj_type.is_ctuple and obj_type.can_coerce_to_pyobject(env))
or (obj_type.is_struct and obj_type.can_coerce_to_pyobject(env))):
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
elif (obj_type.is_cfunction and (self.obj.is_name or self.obj.is_attribute)
and self.obj.entry.as_variable
and self.obj.entry.as_variable.type.is_pyobject):
# might be an optimised builtin function => unpack it
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
else:
error(self.pos,
"Object of type '%s' has no attribute '%s'" %
(obj_type, self.attribute))
def wrap_obj_in_nonecheck(self, env):
if not env.directives['nonecheck']:
return
msg = None
format_args = ()
if (self.obj.type.is_extension_type and self.needs_none_check and not
self.is_py_attr):
msg = "'NoneType' object has no attribute '%{0}s'".format('.30' if len(self.attribute) <= 30 else '')
format_args = (self.attribute,)
elif self.obj.type.is_memoryviewslice:
if self.is_memslice_transpose:
msg = "Cannot transpose None memoryview slice"
else:
entry = self.obj.type.scope.lookup_here(self.attribute)
if entry:
# copy/is_c_contig/shape/strides etc
msg = "Cannot access '%s' attribute of None memoryview slice"
format_args = (entry.name,)
if msg:
self.obj = self.obj.as_none_safe_node(msg, 'PyExc_AttributeError',
format_args=format_args)
def nogil_check(self, env):
if self.is_py_attr:
self.gil_error()
gil_message = "Accessing Python attribute"
def is_cimported_module_without_shadow(self, env):
return self.obj.is_cimported_module_without_shadow(env)
def is_simple(self):
if self.obj:
return self.result_in_temp() or self.obj.is_simple()
else:
return NameNode.is_simple(self)
def is_lvalue(self):
if self.obj:
return True
else:
return NameNode.is_lvalue(self)
def is_ephemeral(self):
if self.obj:
return self.obj.is_ephemeral()
else:
return NameNode.is_ephemeral(self)
def calculate_result_code(self):
#print "AttributeNode.calculate_result_code:", self.member ###
#print "...obj node =", self.obj, "code", self.obj.result() ###
#print "...obj type", self.obj.type, "ctype", self.obj.ctype() ###
obj = self.obj
obj_code = obj.result_as(obj.type)
#print "...obj_code =", obj_code ###
if self.entry and self.entry.is_cmethod:
if obj.type.is_extension_type and not self.entry.is_builtin_cmethod:
if self.entry.final_func_cname:
return self.entry.final_func_cname
if self.type.from_fused:
# If the attribute was specialized through indexing, make
# sure to get the right fused name, as our entry was
# replaced by our parent index node
# (AnalyseExpressionsTransform)
self.member = self.entry.cname
return "((struct %s *)%s%s%s)->%s" % (
obj.type.vtabstruct_cname, obj_code, self.op,
obj.type.vtabslot_cname, self.member)
elif self.result_is_used:
return self.member
# Generating no code at all for unused access to optimised builtin
# methods fixes the problem that some optimisations only exist as
# macros, i.e. there is no function pointer to them, so we would
# generate invalid C code here.
return
elif obj.type.is_complex:
return "__Pyx_C%s(%s)" % (self.member.upper(), obj_code)
else:
if obj.type.is_builtin_type and self.entry and self.entry.is_variable:
# accessing a field of a builtin type, need to cast better than result_as() does
obj_code = obj.type.cast_code(obj.result(), to_object_struct = True)
if obj.type.is_cpp_class and self.entry and self.entry.is_cfunction:
# the entry might have been resolved to an overladed alternative in the meantime
if obj.type.is_cyp_class and self.entry.type.is_static_method and self.entry.static_cname is not None:
self.member = self.entry.static_cname
else:
self.member = self.entry.cname
return "%s%s%s" % (obj_code, self.op, self.member)
def generate_result_code(self, code):
if self.is_py_attr:
if self.is_special_lookup:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_LookupSpecial'
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_GetAttrStr'
code.putln(
'%s = %s(%s, %s); %s' % (
self.result(),
lookup_func_name,
self.obj.py_result(),
code.intern_identifier(self.attribute),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
elif self.type.is_memoryviewslice:
if self.is_memslice_transpose:
# transpose the slice
for access, packing in self.type.axes:
if access == 'ptr':
error(self.pos, "Transposing not supported for slices "
"with indirect dimensions")
return
code.putln("%s = %s;" % (self.result(), self.obj.result()))
code.put_incref_memoryviewslice(self.result(), self.type,
have_gil=True)
T = "__pyx_memslice_transpose(&%s)" % self.result()
code.putln(code.error_goto_if_neg(T, self.pos))
elif self.initialized_check:
code.putln(
'if (unlikely(!%s.memview)) {'
'PyErr_SetString(PyExc_AttributeError,'
'"Memoryview is not initialized");'
'%s'
'}' % (self.result(), code.error_goto(self.pos)))
else:
# result_code contains what is needed, but we may need to insert
# a check and raise an exception
if self.obj.type and self.obj.type.is_extension_type:
pass
elif self.entry and self.entry.is_cmethod:
# C method implemented as function call with utility code
code.globalstate.use_entry_utility_code(self.entry)
def generate_disposal_code(self, code):
if self.is_temp and self.type.is_memoryviewslice and self.is_memslice_transpose:
# mirror condition for putting the memview incref here:
code.put_xdecref_clear(self.result(), self.type, have_gil=True)
else:
ExprNode.generate_disposal_code(self, code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_SetAttrStr(%s, %s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute),
rhs.py_result()))
rhs.generate_disposal_code(code)
rhs.free_temps(code)
elif self.obj.type.is_complex:
code.putln("__Pyx_SET_C%s(%s, %s);" % (
self.member.upper(),
self.obj.result_as(self.obj.type),
rhs.result_as(self.ctype())))
rhs.generate_disposal_code(code)
rhs.free_temps(code)
else:
select_code = self.result()
if self.type.is_pyobject and self.use_managed_ref:
rhs.make_owned_reference(code)
rhs.generate_giveref(code)
code.put_gotref(select_code, self.type)
code.put_decref(select_code, self.ctype())
elif self.type.is_memoryviewslice:
from . import MemoryView
MemoryView.put_assign_to_memviewslice(
select_code, rhs, rhs.result(), self.type, code)
elif self.type.is_cyp_class:
rhs.make_owned_reference(code)
rhs.generate_giveref(code)
code.put_gotref(select_code, self.type)
code.put_xdecref(select_code, self.type)
if not self.type.is_memoryviewslice:
code.putln(
"%s = %s;" % (
select_code,
rhs.move_result_rhs_as(self.ctype())))
#rhs.result()))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.obj.generate_evaluation_code(code)
if self.is_py_attr or (self.entry.scope.is_property_scope
and u'__del__' in self.entry.scope.entries):
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_DelAttrStr(%s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute)))
else:
error(self.pos, "Cannot delete C attribute of extension type")
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def annotate(self, code):
if self.is_py_attr:
style, text = 'py_attr', 'python attribute (%s)'
else:
style, text = 'c_attr', 'c attribute (%s)'
code.annotate(self.pos, AnnotationItem(style, text % self.type, size=len(self.attribute)))
#-------------------------------------------------------------------
#
# Constructor nodes
#
#-------------------------------------------------------------------
class StarredUnpackingNode(ExprNode):
# A starred expression like "*a"
#
# This is only allowed in sequence assignment or construction such as
#
# a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4]
#
# and will be special cased during type analysis (or generate an error
# if it's found at unexpected places).
#
# target ExprNode
subexprs = ['target']
is_starred = 1
type = py_object_type
is_temp = 1
starred_expr_allowed_here = False
def __init__(self, pos, target):
ExprNode.__init__(self, pos, target=target)
def analyse_declarations(self, env):
if not self.starred_expr_allowed_here:
error(self.pos, "starred expression is not allowed here")
self.target.analyse_declarations(env)
def infer_type(self, env):
return self.target.infer_type(env)
def analyse_types(self, env):
if not self.starred_expr_allowed_here:
error(self.pos, "starred expression is not allowed here")
self.target = self.target.analyse_types(env)
self.type = self.target.type
return self
def analyse_target_declaration(self, env):
self.target.analyse_target_declaration(env)
def analyse_target_types(self, env):
self.target = self.target.analyse_target_types(env)
self.type = self.target.type
return self
def calculate_result_code(self):
return ""
def generate_result_code(self, code):
pass
class SequenceNode(ExprNode):
# Base class for list and tuple constructor nodes.
# Contains common code for performing sequence unpacking.
#
# args [ExprNode]
# unpacked_items [ExprNode] or None
# coerced_unpacked_items [ExprNode] or None
# mult_factor ExprNode the integer number of content repetitions ([1,2]*3)
subexprs = ['args', 'mult_factor']
is_sequence_constructor = 1
unpacked_items = None
mult_factor = None
slow = False # trade speed for code size (e.g. use PyTuple_Pack())
def compile_time_value_list(self, denv):
return [arg.compile_time_value(denv) for arg in self.args]
def replace_starred_target_node(self):
# replace a starred node in the targets by the contained expression
self.starred_assignment = False
args = []
for arg in self.args:
if arg.is_starred:
if self.starred_assignment:
error(arg.pos, "more than 1 starred expression in assignment")
self.starred_assignment = True
arg = arg.target
arg.is_starred = True
args.append(arg)
self.args = args
def analyse_target_declaration(self, env):
self.replace_starred_target_node()
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_types(self, env, skip_children=False):
for i, arg in enumerate(self.args):
if not skip_children:
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
if self.mult_factor:
self.mult_factor = self.mult_factor.analyse_types(env)
if not self.mult_factor.type.is_int:
self.mult_factor = self.mult_factor.coerce_to_pyobject(env)
self.is_temp = 1
# not setting self.type here, subtypes do this
return self
def coerce_to_ctuple(self, dst_type, env):
if self.type == dst_type:
return self
assert not self.mult_factor
if len(self.args) != dst_type.size:
error(self.pos, "trying to coerce sequence to ctuple of wrong length, expected %d, got %d" % (
dst_type.size, len(self.args)))
coerced_args = [arg.coerce_to(type, env) for arg, type in zip(self.args, dst_type.components)]
return TupleNode(self.pos, args=coerced_args, type=dst_type, is_temp=True)
def _create_merge_node_if_necessary(self, env):
self._flatten_starred_args()
if not any(arg.is_starred for arg in self.args):
return self
# convert into MergedSequenceNode by building partial sequences
args = []
values = []
for arg in self.args:
if arg.is_starred:
if values:
args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True))
values = []
args.append(arg.target)
else:
values.append(arg)
if values:
args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True))
node = MergedSequenceNode(self.pos, args, self.type)
if self.mult_factor:
node = binop_node(
self.pos, '*', node, self.mult_factor.coerce_to_pyobject(env),
inplace=True, type=self.type, is_temp=True)
return node
def _flatten_starred_args(self):
args = []
for arg in self.args:
if arg.is_starred and arg.target.is_sequence_constructor and not arg.target.mult_factor:
args.extend(arg.target.args)
else:
args.append(arg)
self.args[:] = args
def may_be_none(self):
return False
def analyse_target_types(self, env):
if self.mult_factor:
error(self.pos, "can't assign to multiplied sequence")
self.unpacked_items = []
self.coerced_unpacked_items = []
self.any_coerced_items = False
for i, arg in enumerate(self.args):
arg = self.args[i] = arg.analyse_target_types(env)
if arg.is_starred:
if not arg.type.assignable_from(list_type):
error(arg.pos,
"starred target must have Python object (list) type")
if arg.type is py_object_type:
arg.type = list_type
unpacked_item = PyTempNode(self.pos, env)
coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env)
if unpacked_item is not coerced_unpacked_item:
self.any_coerced_items = True
self.unpacked_items.append(unpacked_item)
self.coerced_unpacked_items.append(coerced_unpacked_item)
self.type = py_object_type
return self
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_sequence_packing_code(self, code, target=None, plain=False):
if target is None:
target = self.result()
size_factor = c_mult = ''
mult_factor = None
if self.mult_factor and not plain:
mult_factor = self.mult_factor
if mult_factor.type.is_int:
c_mult = mult_factor.result()
if (isinstance(mult_factor.constant_result, _py_int_types) and
mult_factor.constant_result > 0):
size_factor = ' * %s' % mult_factor.constant_result
elif mult_factor.type.signed:
size_factor = ' * ((%s<0) ? 0:%s)' % (c_mult, c_mult)
else:
size_factor = ' * (%s)' % (c_mult,)
if self.type is tuple_type and (self.is_literal or self.slow) and not c_mult:
# use PyTuple_Pack() to avoid generating huge amounts of one-time code
code.putln('%s = PyTuple_Pack(%d, %s); %s' % (
target,
len(self.args),
', '.join(arg.py_result() for arg in self.args),
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target, py_object_type)
elif self.type.is_ctuple:
for i, arg in enumerate(self.args):
code.putln("%s.f%s = %s;" % (
target, i, arg.result()))
else:
# build the tuple/list step by step, potentially multiplying it as we go
if self.type is list_type:
create_func, set_item_func = 'PyList_New', 'PyList_SET_ITEM'
elif self.type is tuple_type:
create_func, set_item_func = 'PyTuple_New', 'PyTuple_SET_ITEM'
else:
raise InternalError("sequence packing for unexpected type %s" % self.type)
arg_count = len(self.args)
code.putln("%s = %s(%s%s); %s" % (
target, create_func, arg_count, size_factor,
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target, py_object_type)
if c_mult:
# FIXME: can't use a temp variable here as the code may
# end up in the constant building function. Temps
# currently don't work there.
#counter = code.funcstate.allocate_temp(mult_factor.type, manage_ref=False)
counter = Naming.quick_temp_cname
code.putln('{ Py_ssize_t %s;' % counter)
if arg_count == 1:
offset = counter
else:
offset = '%s * %s' % (counter, arg_count)
code.putln('for (%s=0; %s < %s; %s++) {' % (
counter, counter, c_mult, counter
))
else:
offset = ''
for i in range(arg_count):
arg = self.args[i]
if c_mult or not arg.result_is_new_reference():
code.put_incref(arg.result(), arg.ctype())
arg.generate_giveref(code)
code.putln("%s(%s, %s, %s);" % (
set_item_func,
target,
(offset and i) and ('%s + %s' % (offset, i)) or (offset or i),
arg.py_result()))
if c_mult:
code.putln('}')
#code.funcstate.release_temp(counter)
code.putln('}')
if mult_factor is not None and mult_factor.type.is_pyobject:
code.putln('{ PyObject* %s = PyNumber_InPlaceMultiply(%s, %s); %s' % (
Naming.quick_temp_cname, target, mult_factor.py_result(),
code.error_goto_if_null(Naming.quick_temp_cname, self.pos)
))
code.put_gotref(Naming.quick_temp_cname, py_object_type)
code.put_decref(target, py_object_type)
code.putln('%s = %s;' % (target, Naming.quick_temp_cname))
code.putln('}')
def generate_subexpr_disposal_code(self, code):
if self.mult_factor and self.mult_factor.type.is_int:
super(SequenceNode, self).generate_subexpr_disposal_code(code)
elif self.type is tuple_type and (self.is_literal or self.slow):
super(SequenceNode, self).generate_subexpr_disposal_code(code)
else:
# We call generate_post_assignment_code here instead
# of generate_disposal_code, because values were stored
# in the tuple using a reference-stealing operation.
for arg in self.args:
arg.generate_post_assignment_code(code)
# Should NOT call free_temps -- this is invoked by the default
# generate_evaluation_code which will do that.
if self.mult_factor:
self.mult_factor.generate_disposal_code(code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
if self.starred_assignment:
self.generate_starred_assignment_code(rhs, code)
else:
self.generate_parallel_assignment_code(rhs, code)
for item in self.unpacked_items:
item.release(code)
rhs.free_temps(code)
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def generate_parallel_assignment_code(self, rhs, code):
# Need to work around the fact that generate_evaluation_code
# allocates the temps in a rather hacky way -- the assignment
# is evaluated twice, within each if-block.
for item in self.unpacked_items:
item.allocate(code)
special_unpack = (rhs.type is py_object_type
or rhs.type in (tuple_type, list_type)
or not rhs.type.is_builtin_type)
long_enough_for_a_loop = len(self.unpacked_items) > 3
if special_unpack:
self.generate_special_parallel_unpacking_code(
code, rhs, use_loop=long_enough_for_a_loop)
else:
code.putln("{")
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=long_enough_for_a_loop)
code.putln("}")
for value_node in self.coerced_unpacked_items:
value_node.generate_evaluation_code(code)
for i in range(len(self.args)):
self.args[i].generate_assignment_code(
self.coerced_unpacked_items[i], code)
def generate_special_parallel_unpacking_code(self, code, rhs, use_loop):
sequence_type_test = '1'
none_check = "likely(%s != Py_None)" % rhs.py_result()
if rhs.type is list_type:
sequence_types = ['List']
if rhs.may_be_none():
sequence_type_test = none_check
elif rhs.type is tuple_type:
sequence_types = ['Tuple']
if rhs.may_be_none():
sequence_type_test = none_check
else:
sequence_types = ['Tuple', 'List']
tuple_check = 'likely(PyTuple_CheckExact(%s))' % rhs.py_result()
list_check = 'PyList_CheckExact(%s)' % rhs.py_result()
sequence_type_test = "(%s) || (%s)" % (tuple_check, list_check)
code.putln("if (%s) {" % sequence_type_test)
code.putln("PyObject* sequence = %s;" % rhs.py_result())
# list/tuple => check size
code.putln("Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);")
code.putln("if (unlikely(size != %d)) {" % len(self.args))
code.globalstate.use_utility_code(raise_too_many_values_to_unpack)
code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % (
len(self.args), len(self.args)))
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);")
# < 0 => exception
code.putln(code.error_goto(self.pos))
code.putln("}")
code.putln("#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS")
# unpack items from list/tuple in unrolled loop (can't fail)
if len(sequence_types) == 2:
code.putln("if (likely(Py%s_CheckExact(sequence))) {" % sequence_types[0])
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[0], i))
if len(sequence_types) == 2:
code.putln("} else {")
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[1], i))
code.putln("}")
for item in self.unpacked_items:
code.put_incref(item.result(), item.ctype())
code.putln("#else")
# in non-CPython, use the PySequence protocol (which can fail)
if not use_loop:
for i, item in enumerate(self.unpacked_items):
code.putln("%s = PySequence_ITEM(sequence, %d); %s" % (
item.result(), i,
code.error_goto_if_null(item.result(), self.pos)))
code.put_gotref(item.result(), item.type)
else:
code.putln("{")
code.putln("Py_ssize_t i;")
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in self.unpacked_items])))
code.putln("for (i=0; i < %s; i++) {" % len(self.unpacked_items))
code.putln("PyObject* item = PySequence_ITEM(sequence, i); %s" % (
code.error_goto_if_null('item', self.pos)))
code.put_gotref('item', py_object_type)
code.putln("*(temps[i]) = item;")
code.putln("}")
code.putln("}")
code.putln("#endif")
rhs.generate_disposal_code(code)
if sequence_type_test == '1':
code.putln("}") # all done
elif sequence_type_test == none_check:
# either tuple/list or None => save some code by generating the error directly
code.putln("} else {")
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseNoneIterError", "ObjectHandling.c"))
code.putln("__Pyx_RaiseNoneNotIterableError(); %s" % code.error_goto(self.pos))
code.putln("}") # all done
else:
code.putln("} else {") # needs iteration fallback code
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=use_loop)
code.putln("}")
def generate_generic_parallel_unpacking_code(self, code, rhs, unpacked_items, use_loop, terminate=True):
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.globalstate.use_utility_code(UtilityCode.load_cached("IterFinish", "ObjectHandling.c"))
code.putln("Py_ssize_t index = -1;") # must be at the start of a C block!
if use_loop:
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in unpacked_items])))
iterator_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(
"%s = PyObject_GetIter(%s); %s" % (
iterator_temp,
rhs.py_result(),
code.error_goto_if_null(iterator_temp, self.pos)))
code.put_gotref(iterator_temp, py_object_type)
rhs.generate_disposal_code(code)
iternext_func = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
code.putln("%s = __Pyx_PyObject_GetIterNextFunc(%s);" % (
iternext_func, iterator_temp))
unpacking_error_label = code.new_label('unpacking_failed')
unpack_code = "%s(%s)" % (iternext_func, iterator_temp)
if use_loop:
code.putln("for (index=0; index < %s; index++) {" % len(unpacked_items))
code.put("PyObject* item = %s; if (unlikely(!item)) " % unpack_code)
code.put_goto(unpacking_error_label)
code.put_gotref("item", py_object_type)
code.putln("*(temps[index]) = item;")
code.putln("}")
else:
for i, item in enumerate(unpacked_items):
code.put(
"index = %d; %s = %s; if (unlikely(!%s)) " % (
i,
item.result(),
unpack_code,
item.result()))
code.put_goto(unpacking_error_label)
item.generate_gotref(code)
if terminate:
code.globalstate.use_utility_code(
UtilityCode.load_cached("UnpackItemEndCheck", "ObjectHandling.c"))
code.put_error_if_neg(self.pos, "__Pyx_IternextUnpackEndCheck(%s, %d)" % (
unpack_code,
len(unpacked_items)))
code.putln("%s = NULL;" % iternext_func)
code.put_decref_clear(iterator_temp, py_object_type)
unpacking_done_label = code.new_label('unpacking_done')
code.put_goto(unpacking_done_label)
code.put_label(unpacking_error_label)
code.put_decref_clear(iterator_temp, py_object_type)
code.putln("%s = NULL;" % iternext_func)
code.putln("if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);")
code.putln(code.error_goto(self.pos))
code.put_label(unpacking_done_label)
code.funcstate.release_temp(iternext_func)
if terminate:
code.funcstate.release_temp(iterator_temp)
iterator_temp = None
return iterator_temp
def generate_starred_assignment_code(self, rhs, code):
for i, arg in enumerate(self.args):
if arg.is_starred:
starred_target = self.unpacked_items[i]
unpacked_fixed_items_left = self.unpacked_items[:i]
unpacked_fixed_items_right = self.unpacked_items[i+1:]
break
else:
assert False
iterator_temp = None
if unpacked_fixed_items_left:
for item in unpacked_fixed_items_left:
item.allocate(code)
code.putln('{')
iterator_temp = self.generate_generic_parallel_unpacking_code(
code, rhs, unpacked_fixed_items_left,
use_loop=True, terminate=False)
for i, item in enumerate(unpacked_fixed_items_left):
value_node = self.coerced_unpacked_items[i]
value_node.generate_evaluation_code(code)
code.putln('}')
starred_target.allocate(code)
target_list = starred_target.result()
code.putln("%s = %s(%s); %s" % (
target_list,
"__Pyx_PySequence_ListKeepNew" if (
not iterator_temp and rhs.is_temp and rhs.type in (py_object_type, list_type))
else "PySequence_List",
iterator_temp or rhs.py_result(),
code.error_goto_if_null(target_list, self.pos)))
starred_target.generate_gotref(code)
if iterator_temp:
code.put_decref_clear(iterator_temp, py_object_type)
code.funcstate.release_temp(iterator_temp)
else:
rhs.generate_disposal_code(code)
if unpacked_fixed_items_right:
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list))
code.putln("if (unlikely(%s < %d)) {" % (length_temp, len(unpacked_fixed_items_right)))
code.putln("__Pyx_RaiseNeedMoreValuesError(%d+%s); %s" % (
len(unpacked_fixed_items_left), length_temp,
code.error_goto(self.pos)))
code.putln('}')
for item in unpacked_fixed_items_right[::-1]:
item.allocate(code)
for i, (item, coerced_arg) in enumerate(zip(unpacked_fixed_items_right[::-1],
self.coerced_unpacked_items[::-1])):
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln("%s = PyList_GET_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
# resize the list the hard way
code.putln("((PyVarObject*)%s)->ob_size--;" % target_list)
code.putln('#else')
code.putln("%s = PySequence_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
code.putln('#endif')
item.generate_gotref(code)
coerced_arg.generate_evaluation_code(code)
code.putln('#if !CYTHON_COMPILING_IN_CPYTHON')
sublist_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln('%s = PySequence_GetSlice(%s, 0, %s-%d); %s' % (
sublist_temp, target_list, length_temp, len(unpacked_fixed_items_right),
code.error_goto_if_null(sublist_temp, self.pos)))
code.put_gotref(sublist_temp, py_object_type)
code.funcstate.release_temp(length_temp)
code.put_decref(target_list, py_object_type)
code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp))
code.putln('#else')
code.putln('(void)%s;' % sublist_temp) # avoid warning about unused variable
code.funcstate.release_temp(sublist_temp)
code.putln('#endif')
for i, arg in enumerate(self.args):
arg.generate_assignment_code(self.coerced_unpacked_items[i], code)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
if self.unpacked_items:
for arg in self.unpacked_items:
arg.annotate(code)
for arg in self.coerced_unpacked_items:
arg.annotate(code)
class TupleNode(SequenceNode):
# Tuple constructor.
type = tuple_type
is_partly_literal = False
gil_message = "Constructing Python tuple"
def infer_type(self, env):
if self.mult_factor or not self.args:
return tuple_type
arg_types = [arg.infer_type(env) for arg in self.args]
if any(type.is_pyobject or type.is_memoryviewslice or type.is_unspecified or type.is_fused
for type in arg_types):
return tuple_type
return env.declare_tuple_type(self.pos, arg_types).type
def analyse_types(self, env, skip_children=False):
if len(self.args) == 0:
self.is_temp = False
self.is_literal = True
return self
if not skip_children:
for i, arg in enumerate(self.args):
if arg.is_starred:
arg.starred_expr_allowed_here = True
self.args[i] = arg.analyse_types(env)
if (not self.mult_factor and
not any((arg.is_starred or arg.type.is_pyobject or arg.type.is_memoryviewslice or arg.type.is_fused)
for arg in self.args)):
self.type = env.declare_tuple_type(self.pos, (arg.type for arg in self.args)).type
self.is_temp = 1
return self
node = SequenceNode.analyse_types(self, env, skip_children=True)
node = node._create_merge_node_if_necessary(env)
if not node.is_sequence_constructor:
return node
if not all(child.is_literal for child in node.args):
return node
if not node.mult_factor or (
node.mult_factor.is_literal and
isinstance(node.mult_factor.constant_result, _py_int_types)):
node.is_temp = False
node.is_literal = True
else:
if not node.mult_factor.type.is_pyobject:
node.mult_factor = node.mult_factor.coerce_to_pyobject(env)
node.is_temp = True
node.is_partly_literal = True
return node
def analyse_as_type(self, env):
# ctuple type
if not self.args:
return None
item_types = [arg.analyse_as_type(env) for arg in self.args]
if any(t is None for t in item_types):
return None
entry = env.declare_tuple_type(self.pos, item_types)
return entry.type
def coerce_to(self, dst_type, env):
if self.type.is_ctuple:
if dst_type.is_ctuple and self.type.size == dst_type.size:
return self.coerce_to_ctuple(dst_type, env)
elif dst_type is tuple_type or dst_type is py_object_type:
coerced_args = [arg.coerce_to_pyobject(env) for arg in self.args]
return TupleNode(self.pos, args=coerced_args, type=tuple_type, is_temp=1).analyse_types(env, skip_children=True)
else:
return self.coerce_to_pyobject(env).coerce_to(dst_type, env)
elif dst_type.is_ctuple and not self.mult_factor:
return self.coerce_to_ctuple(dst_type, env)
else:
return SequenceNode.coerce_to(self, dst_type, env)
def as_list(self):
t = ListNode(self.pos, args=self.args, mult_factor=self.mult_factor)
if isinstance(self.constant_result, tuple):
t.constant_result = list(self.constant_result)
return t
def is_simple(self):
# either temp or constant => always simple
return True
def nonlocally_immutable(self):
# either temp or constant => always safe
return True
def calculate_result_code(self):
if len(self.args) > 0:
return self.result_code
else:
return Naming.empty_tuple
def calculate_constant_result(self):
self.constant_result = tuple([
arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = self.compile_time_value_list(denv)
try:
return tuple(values)
except Exception as e:
self.compile_time_value_error(e)
def generate_operation_code(self, code):
if len(self.args) == 0:
# result_code is Naming.empty_tuple
return
if self.is_literal or self.is_partly_literal:
# The "mult_factor" is part of the deduplication if it is also constant, i.e. when
# we deduplicate the multiplied result. Otherwise, only deduplicate the constant part.
dedup_key = make_dedup_key(self.type, [self.mult_factor if self.is_literal else None] + self.args)
tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2, dedup_key=dedup_key)
const_code = code.get_cached_constants_writer(tuple_target)
if const_code is not None:
# constant is not yet initialised
const_code.mark_pos(self.pos)
self.generate_sequence_packing_code(const_code, tuple_target, plain=not self.is_literal)
const_code.put_giveref(tuple_target, py_object_type)
if self.is_literal:
self.result_code = tuple_target
else:
code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
self.result(), tuple_target, self.mult_factor.py_result(),
code.error_goto_if_null(self.result(), self.pos)
))
self.generate_gotref(code)
else:
self.type.entry.used = True
self.generate_sequence_packing_code(code)
class ListNode(SequenceNode):
# List constructor.
# obj_conversion_errors [PyrexError] used internally
# orignial_args [ExprNode] used internally
obj_conversion_errors = []
type = list_type
in_module_scope = False
gil_message = "Constructing Python list"
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TODO: Infer non-object list arrays.
return list_type
def analyse_expressions(self, env):
for arg in self.args:
if arg.is_starred:
arg.starred_expr_allowed_here = True
node = SequenceNode.analyse_expressions(self, env)
return node.coerce_to_pyobject(env)
def analyse_types(self, env):
with local_errors(ignore=True) as errors:
self.original_args = list(self.args)
node = SequenceNode.analyse_types(self, env)
node.obj_conversion_errors = errors
if env.is_module_scope:
self.in_module_scope = True
node = node._create_merge_node_if_necessary(env)
return node
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
elif (dst_type.is_array or dst_type.is_ptr) and dst_type.base_type is not PyrexTypes.c_void_type:
array_length = len(self.args)
if self.mult_factor:
if isinstance(self.mult_factor.constant_result, _py_int_types):
if self.mult_factor.constant_result <= 0:
error(self.pos, "Cannot coerce non-positively multiplied list to '%s'" % dst_type)
else:
array_length *= self.mult_factor.constant_result
else:
error(self.pos, "Cannot coerce dynamically multiplied list to '%s'" % dst_type)
base_type = dst_type.base_type
self.type = PyrexTypes.CArrayType(base_type, array_length)
for i in range(len(self.original_args)):
arg = self.args[i]
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(base_type, env)
elif dst_type.is_cpp_class:
# TODO(robertwb): Avoid object conversion for vector/list/set.
return TypecastNode(self.pos, operand=self, type=PyrexTypes.py_object_type).coerce_to(dst_type, env)
elif self.mult_factor:
error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type)
elif dst_type.is_struct:
if len(self.args) > len(dst_type.scope.var_entries):
error(self.pos, "Too many members for '%s'" % dst_type)
else:
if len(self.args) < len(dst_type.scope.var_entries):
warning(self.pos, "Too few members for '%s'" % dst_type, 1)
for i, (arg, member) in enumerate(zip(self.original_args, dst_type.scope.var_entries)):
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(member.type, env)
self.type = dst_type
elif dst_type.is_ctuple:
return self.coerce_to_ctuple(dst_type, env)
else:
self.type = error_type
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
return self
def as_list(self): # dummy for compatibility with TupleNode
return self
def as_tuple(self):
t = TupleNode(self.pos, args=self.args, mult_factor=self.mult_factor)
if isinstance(self.constant_result, list):
t.constant_result = tuple(self.constant_result)
return t
def allocate_temp_result(self, code):
if self.type.is_array:
if self.in_module_scope:
self.temp_code = code.funcstate.allocate_temp(
self.type, manage_ref=False, static=True, reusable=False)
else:
# To be valid C++, we must allocate the memory on the stack
# manually and be sure not to reuse it for something else.
# Yes, this means that we leak a temp array variable.
self.temp_code = code.funcstate.allocate_temp(
self.type, manage_ref=False, reusable=False)
else:
SequenceNode.allocate_temp_result(self, code)
def calculate_constant_result(self):
if self.mult_factor:
raise ValueError() # may exceed the compile time memory
self.constant_result = [
arg.constant_result for arg in self.args]
def compile_time_value(self, denv):
l = self.compile_time_value_list(denv)
if self.mult_factor:
l *= self.mult_factor.compile_time_value(denv)
return l
def generate_operation_code(self, code):
if self.type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.generate_sequence_packing_code(code)
elif self.type.is_array:
if self.mult_factor:
code.putln("{")
code.putln("Py_ssize_t %s;" % Naming.quick_temp_cname)
code.putln("for ({i} = 0; {i} < {count}; {i}++) {{".format(
i=Naming.quick_temp_cname, count=self.mult_factor.result()))
offset = '+ (%d * %s)' % (len(self.args), Naming.quick_temp_cname)
else:
offset = ''
for i, arg in enumerate(self.args):
if arg.type.is_array:
code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
code.putln("memcpy(&(%s[%s%s]), %s, sizeof(%s[0]));" % (
self.result(), i, offset,
arg.result(), self.result()
))
else:
code.putln("%s[%s%s] = %s;" % (
self.result(),
i,
offset,
arg.result()))
if self.mult_factor:
code.putln("}")
code.putln("}")
elif self.type.is_struct:
for arg, member in zip(self.args, self.type.scope.var_entries):
code.putln("%s.%s = %s;" % (
self.result(),
member.cname,
arg.result()))
else:
raise InternalError("List type never specified")
class ScopedExprNode(ExprNode):
# Abstract base class for ExprNodes that have their own local
# scope, such as generator expressions.
#
# expr_scope Scope the inner scope of the expression
subexprs = []
expr_scope = None
# does this node really have a local scope, e.g. does it leak loop
# variables or not? non-leaking Py3 behaviour is default, except
# for list comprehensions where the behaviour differs in Py2 and
# Py3 (set in Parsing.py based on parser context)
has_local_scope = True
def init_scope(self, outer_scope, expr_scope=None):
if expr_scope is not None:
self.expr_scope = expr_scope
elif self.has_local_scope:
self.expr_scope = Symtab.GeneratorExpressionScope(outer_scope)
else:
self.expr_scope = None
def analyse_declarations(self, env):
self.init_scope(env)
def analyse_scoped_declarations(self, env):
# this is called with the expr_scope as env
pass
def analyse_types(self, env):
# no recursion here, the children will be analysed separately below
return self
def analyse_scoped_expressions(self, env):
# this is called with the expr_scope as env
return self
def generate_evaluation_code(self, code):
# set up local variables and free their references on exit
generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code
if not self.has_local_scope or not self.expr_scope.var_entries:
# no local variables => delegate, done
generate_inner_evaluation_code(code)
return
code.putln('{ /* enter inner scope */')
py_entries = []
for _, entry in sorted(item for item in self.expr_scope.entries.items() if item[0]):
if not entry.in_closure:
if entry.type.is_pyobject and entry.used:
py_entries.append(entry)
if not py_entries:
# no local Python references => no cleanup required
generate_inner_evaluation_code(code)
code.putln('} /* exit inner scope */')
return
# must free all local Python references at each exit point
old_loop_labels = code.new_loop_labels()
old_error_label = code.new_error_label()
generate_inner_evaluation_code(code)
# normal (non-error) exit
self._generate_vars_cleanup(code, py_entries)
# error/loop body exit points
exit_scope = code.new_label('exit_scope')
code.put_goto(exit_scope)
for label, old_label in ([(code.error_label, old_error_label)] +
list(zip(code.get_loop_labels(), old_loop_labels))):
if code.label_used(label):
code.put_label(label)
self._generate_vars_cleanup(code, py_entries)
code.put_goto(old_label)
code.put_label(exit_scope)
code.putln('} /* exit inner scope */')
code.set_loop_labels(old_loop_labels)
code.error_label = old_error_label
def _generate_vars_cleanup(self, code, py_entries):
for entry in py_entries:
if entry.is_cglobal:
code.put_var_gotref(entry)
code.put_var_decref_set(entry, "Py_None")
else:
code.put_var_xdecref_clear(entry)
class ComprehensionNode(ScopedExprNode):
# A list/set/dict comprehension
child_attrs = ["loop"]
is_temp = True
constant_result = not_a_constant
def infer_type(self, env):
return self.type
def analyse_declarations(self, env):
self.append.target = self # this is used in the PyList_Append of the inner loop
self.init_scope(env)
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def analyse_types(self, env):
if not self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def analyse_scoped_expressions(self, env):
if self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_operation_code(self, code):
if self.type is Builtin.list_type:
create_code = 'PyList_New(0)'
elif self.type is Builtin.set_type:
create_code = 'PySet_New(NULL)'
elif self.type is Builtin.dict_type:
create_code = 'PyDict_New()'
else:
raise InternalError("illegal type for comprehension: %s" % self.type)
code.putln('%s = %s; %s' % (
self.result(), create_code,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
self.loop.generate_execution_code(code)
def annotate(self, code):
self.loop.annotate(code)
class ComprehensionAppendNode(Node):
# Need to be careful to avoid infinite recursion:
# target must not be in child_attrs/subexprs
child_attrs = ['expr']
target = None
type = PyrexTypes.c_int_type
def analyse_expressions(self, env):
self.expr = self.expr.analyse_expressions(env)
if not self.expr.type.is_pyobject:
self.expr = self.expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
if self.target.type is list_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ListCompAppend", "Optimize.c"))
function = "__Pyx_ListComp_Append"
elif self.target.type is set_type:
function = "PySet_Add"
else:
raise InternalError(
"Invalid type for comprehension node: %s" % self.target.type)
self.expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("%s(%s, (PyObject*)%s)" % (
function,
self.target.result(),
self.expr.result()
), self.pos))
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.expr.generate_function_definitions(env, code)
def annotate(self, code):
self.expr.annotate(code)
class DictComprehensionAppendNode(ComprehensionAppendNode):
child_attrs = ['key_expr', 'value_expr']
def analyse_expressions(self, env):
self.key_expr = self.key_expr.analyse_expressions(env)
if not self.key_expr.type.is_pyobject:
self.key_expr = self.key_expr.coerce_to_pyobject(env)
self.value_expr = self.value_expr.analyse_expressions(env)
if not self.value_expr.type.is_pyobject:
self.value_expr = self.value_expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
self.key_expr.generate_evaluation_code(code)
self.value_expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s)" % (
self.target.result(),
self.key_expr.result(),
self.value_expr.result()
), self.pos))
self.key_expr.generate_disposal_code(code)
self.key_expr.free_temps(code)
self.value_expr.generate_disposal_code(code)
self.value_expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.key_expr.generate_function_definitions(env, code)
self.value_expr.generate_function_definitions(env, code)
def annotate(self, code):
self.key_expr.annotate(code)
self.value_expr.annotate(code)
class InlinedGeneratorExpressionNode(ExprNode):
# An inlined generator expression for which the result is calculated
# inside of the loop and returned as a single, first and only Generator
# return value.
# This will only be created by transforms when replacing safe builtin
# calls on generator expressions.
#
# gen GeneratorExpressionNode the generator, not containing any YieldExprNodes
# orig_func String the name of the builtin function this node replaces
# target ExprNode or None a 'target' for a ComprehensionAppend node
subexprs = ["gen"]
orig_func = None
target = None
is_temp = True
type = py_object_type
def __init__(self, pos, gen, comprehension_type=None, **kwargs):
gbody = gen.def_node.gbody
gbody.is_inlined = True
if comprehension_type is not None:
assert comprehension_type in (list_type, set_type, dict_type), comprehension_type
gbody.inlined_comprehension_type = comprehension_type
kwargs.update(
target=RawCNameExprNode(pos, comprehension_type, Naming.retval_cname),
type=comprehension_type,
)
super(InlinedGeneratorExpressionNode, self).__init__(pos, gen=gen, **kwargs)
def may_be_none(self):
return self.orig_func not in ('any', 'all', 'sorted')
def infer_type(self, env):
return self.type
def analyse_types(self, env):
self.gen = self.gen.analyse_expressions(env)
return self
def generate_result_code(self, code):
code.putln("%s = __Pyx_Generator_Next(%s); %s" % (
self.result(), self.gen.result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
class MergedSequenceNode(ExprNode):
"""
Merge a sequence of iterables into a set/list/tuple.
The target collection is determined by self.type, which must be set externally.
args [ExprNode]
"""
subexprs = ['args']
is_temp = True
gil_message = "Constructing Python collection"
def __init__(self, pos, args, type):
if type in (list_type, tuple_type) and args and args[0].is_sequence_constructor:
# construct a list directly from the first argument that we can then extend
if args[0].type is not list_type:
args[0] = ListNode(args[0].pos, args=args[0].args, is_temp=True)
ExprNode.__init__(self, pos, args=args, type=type)
def calculate_constant_result(self):
result = []
for item in self.args:
if item.is_sequence_constructor and item.mult_factor:
if item.mult_factor.constant_result <= 0:
continue
# otherwise, adding each item once should be enough
if item.is_set_literal or item.is_sequence_constructor:
# process items in order
items = (arg.constant_result for arg in item.args)
else:
items = item.constant_result
result.extend(items)
if self.type is set_type:
result = set(result)
elif self.type is tuple_type:
result = tuple(result)
else:
assert self.type is list_type
self.constant_result = result
def compile_time_value(self, denv):
result = []
for item in self.args:
if item.is_sequence_constructor and item.mult_factor:
if item.mult_factor.compile_time_value(denv) <= 0:
continue
if item.is_set_literal or item.is_sequence_constructor:
# process items in order
items = (arg.compile_time_value(denv) for arg in item.args)
else:
items = item.compile_time_value(denv)
result.extend(items)
if self.type is set_type:
try:
result = set(result)
except Exception as e:
self.compile_time_value_error(e)
elif self.type is tuple_type:
result = tuple(result)
else:
assert self.type is list_type
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return self.type
def analyse_types(self, env):
args = [
arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after * must be an iterable, not NoneType')
for arg in self.args
]
if len(args) == 1 and args[0].type is self.type:
# strip this intermediate node and use the bare collection
return args[0]
assert self.type in (set_type, list_type, tuple_type)
self.args = args
return self
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
is_set = self.type is set_type
args = iter(self.args)
item = next(args)
item.generate_evaluation_code(code)
if (is_set and item.is_set_literal or
not is_set and item.is_sequence_constructor and item.type is list_type):
code.putln("%s = %s;" % (self.result(), item.py_result()))
item.generate_post_assignment_code(code)
else:
code.putln("%s = %s(%s); %s" % (
self.result(),
'PySet_New' if is_set
else "__Pyx_PySequence_ListKeepNew" if item.is_temp and item.type in (py_object_type, list_type)
else "PySequence_List",
item.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
item.generate_disposal_code(code)
item.free_temps(code)
helpers = set()
if is_set:
add_func = "PySet_Add"
extend_func = "__Pyx_PySet_Update"
else:
add_func = "__Pyx_ListComp_Append"
extend_func = "__Pyx_PyList_Extend"
for item in args:
if (is_set and (item.is_set_literal or item.is_sequence_constructor) or
(item.is_sequence_constructor and not item.mult_factor)):
if not is_set and item.args:
helpers.add(("ListCompAppend", "Optimize.c"))
for arg in item.args:
arg.generate_evaluation_code(code)
code.put_error_if_neg(arg.pos, "%s(%s, %s)" % (
add_func,
self.result(),
arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
continue
if is_set:
helpers.add(("PySet_Update", "Builtins.c"))
else:
helpers.add(("ListExtend", "Optimize.c"))
item.generate_evaluation_code(code)
code.put_error_if_neg(item.pos, "%s(%s, %s)" % (
extend_func,
self.result(),
item.py_result()))
item.generate_disposal_code(code)
item.free_temps(code)
if self.type is tuple_type:
code.putln("{")
code.putln("PyObject *%s = PyList_AsTuple(%s);" % (
Naming.quick_temp_cname,
self.result()))
code.put_decref(self.result(), py_object_type)
code.putln("%s = %s; %s" % (
self.result(),
Naming.quick_temp_cname,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
code.putln("}")
for helper in sorted(helpers):
code.globalstate.use_utility_code(UtilityCode.load_cached(*helper))
def annotate(self, code):
for item in self.args:
item.annotate(code)
class SetNode(ExprNode):
"""
Set constructor.
"""
subexprs = ['args']
type = set_type
is_set_literal = True
gil_message = "Constructing Python set"
def analyse_types(self, env):
for i in range(len(self.args)):
arg = self.args[i]
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
self.type = set_type
self.is_temp = 1
return self
def may_be_none(self):
return False
def calculate_constant_result(self):
self.constant_result = set([arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = [arg.compile_time_value(denv) for arg in self.args]
try:
return set(values)
except Exception as e:
self.compile_time_value_error(e)
def generate_evaluation_code(self, code):
for arg in self.args:
arg.generate_evaluation_code(code)
self.allocate_temp_result(code)
code.putln(
"%s = PySet_New(0); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
for arg in self.args:
code.put_error_if_neg(
self.pos,
"PySet_Add(%s, %s)" % (self.result(), arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
class DictNode(ExprNode):
# Dictionary constructor.
#
# key_value_pairs [DictItemNode]
# exclude_null_values [boolean] Do not add NULL values to dict
#
# obj_conversion_errors [PyrexError] used internally
subexprs = ['key_value_pairs']
is_temp = 1
exclude_null_values = False
type = dict_type
is_dict_literal = True
reject_duplicates = False
obj_conversion_errors = []
@classmethod
def from_pairs(cls, pos, pairs):
return cls(pos, key_value_pairs=[
DictItemNode(pos, key=k, value=v) for k, v in pairs])
def calculate_constant_result(self):
self.constant_result = dict([
item.constant_result for item in self.key_value_pairs])
def compile_time_value(self, denv):
pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.key_value_pairs]
try:
return dict(pairs)
except Exception as e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TODO: Infer struct constructors.
return dict_type
def analyse_types(self, env):
with local_errors(ignore=True) as errors:
self.key_value_pairs = [
item.analyse_types(env)
for item in self.key_value_pairs
]
self.obj_conversion_errors = errors
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
self.release_errors()
if self.type.is_struct_or_union:
if not dict_type.subtype_of(dst_type):
error(self.pos, "Cannot interpret struct as non-dict type '%s'" % dst_type)
return DictNode(self.pos, key_value_pairs=[
DictItemNode(item.pos, key=item.key.coerce_to_pyobject(env),
value=item.value.coerce_to_pyobject(env))
for item in self.key_value_pairs])
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
elif dst_type.is_struct_or_union:
self.type = dst_type
if not dst_type.is_struct and len(self.key_value_pairs) != 1:
error(self.pos, "Exactly one field must be specified to convert to union '%s'" % dst_type)
elif dst_type.is_struct and len(self.key_value_pairs) < len(dst_type.scope.var_entries):
warning(self.pos, "Not all members given for struct '%s'" % dst_type, 1)
for item in self.key_value_pairs:
if isinstance(item.key, CoerceToPyTypeNode):
item.key = item.key.arg
if not item.key.is_string_literal:
error(item.key.pos, "Invalid struct field identifier")
item.key = StringNode(item.key.pos, value="")
else:
key = str(item.key.value) # converts string literals to unicode in Py3
member = dst_type.scope.lookup_here(key)
if not member:
error(item.key.pos, "struct '%s' has no field '%s'" % (dst_type, key))
else:
value = item.value
if isinstance(value, CoerceToPyTypeNode):
value = value.arg
item.value = value.coerce_to(member.type, env)
else:
self.type = error_type
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
return self
def release_errors(self):
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
# Custom method used here because key-value
# pairs are evaluated and used one at a time.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
is_dict = self.type.is_pyobject
if is_dict:
self.release_errors()
code.putln(
"%s = __Pyx_PyDict_NewPresized(%d); %s" % (
self.result(),
len(self.key_value_pairs),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
keys_seen = set()
key_type = None
needs_error_helper = False
for item in self.key_value_pairs:
item.generate_evaluation_code(code)
if is_dict:
if self.exclude_null_values:
code.putln('if (%s) {' % item.value.py_result())
key = item.key
if self.reject_duplicates:
if keys_seen is not None:
# avoid runtime 'in' checks for literals that we can do at compile time
if not key.is_string_literal:
keys_seen = None
elif key.value in keys_seen:
# FIXME: this could be a compile time error, at least in Cython code
keys_seen = None
elif key_type is not type(key.value):
if key_type is None:
key_type = type(key.value)
keys_seen.add(key.value)
else:
# different types => may not be able to compare at compile time
keys_seen = None
else:
keys_seen.add(key.value)
if keys_seen is None:
code.putln('if (unlikely(PyDict_Contains(%s, %s))) {' % (
self.result(), key.py_result()))
# currently only used in function calls
needs_error_helper = True
code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
key.py_result(),
code.error_goto(item.pos)))
code.putln("} else {")
code.put_error_if_neg(self.pos, "PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
if self.reject_duplicates and keys_seen is None:
code.putln('}')
if self.exclude_null_values:
code.putln('}')
else:
code.putln("%s.%s = %s;" % (
self.result(),
item.key.value,
item.value.result()))
item.generate_disposal_code(code)
item.free_temps(code)
if needs_error_helper:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c"))
def annotate(self, code):
for item in self.key_value_pairs:
item.annotate(code)
class DictItemNode(ExprNode):
# Represents a single item in a DictNode
#
# key ExprNode
# value ExprNode
subexprs = ['key', 'value']
nogil_check = None # Parent DictNode takes care of it
def calculate_constant_result(self):
self.constant_result = (
self.key.constant_result, self.value.constant_result)
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
self.value = self.value.coerce_to_pyobject(env)
return self
def generate_evaluation_code(self, code):
self.key.generate_evaluation_code(code)
self.value.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.key.generate_disposal_code(code)
self.value.generate_disposal_code(code)
def free_temps(self, code):
self.key.free_temps(code)
self.value.free_temps(code)
def __iter__(self):
return iter([self.key, self.value])
class SortedDictKeysNode(ExprNode):
# build sorted list of dict keys, e.g. for dir()
subexprs = ['arg']
is_temp = True
def __init__(self, arg):
ExprNode.__init__(self, arg.pos, arg=arg)
self.type = Builtin.list_type
def analyse_types(self, env):
arg = self.arg.analyse_types(env)
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node(
"'NoneType' object is not iterable")
self.arg = arg
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
dict_result = self.arg.py_result()
if self.arg.type is Builtin.dict_type:
code.putln('%s = PyDict_Keys(%s); %s' % (
self.result(), dict_result,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
else:
# originally used PyMapping_Keys() here, but that may return a tuple
code.globalstate.use_utility_code(UtilityCode.load_cached(
'PyObjectCallMethod0', 'ObjectHandling.c'))
keys_cname = code.intern_identifier(StringEncoding.EncodedString("keys"))
code.putln('%s = __Pyx_PyObject_CallMethod0(%s, %s); %s' % (
self.result(), dict_result, keys_cname,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
code.putln("if (unlikely(!PyList_Check(%s))) {" % self.result())
self.generate_decref_set(code, "PySequence_List(%s)" % self.result())
code.putln(code.error_goto_if_null(self.result(), self.pos))
self.generate_gotref(code)
code.putln("}")
code.put_error_if_neg(
self.pos, 'PyList_Sort(%s)' % self.py_result())
class ModuleNameMixin(object):
def get_py_mod_name(self, code):
return code.get_py_string_const(
self.module_name, identifier=True)
def get_py_qualified_name(self, code):
return code.get_py_string_const(
self.qualname, identifier=True)
class ClassNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# class_def_node PyClassDefNode PyClassDefNode defining this class
# doc ExprNode or None Doc string
# module_name EncodedString Name of defining module
subexprs = ['doc']
type = py_object_type
is_temp = True
def infer_type(self, env):
# TODO: could return 'type' in some cases
return py_object_type
def analyse_types(self, env):
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
env.use_utility_code(UtilityCode.load_cached("CreateClass", "ObjectHandling.c"))
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
class_def_node = self.class_def_node
cname = code.intern_identifier(self.name)
if self.doc:
code.put_error_if_neg(self.pos,
'PyDict_SetItem(%s, %s, %s)' % (
class_def_node.dict.py_result(),
code.intern_identifier(
StringEncoding.EncodedString("__doc__")),
self.doc.py_result()))
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
code.putln(
'%s = __Pyx_CreateClass(%s, %s, %s, %s, %s); %s' % (
self.result(),
class_def_node.bases.py_result(),
class_def_node.dict.py_result(),
cname,
qualname,
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
class Py3ClassNode(ExprNode):
# Helper class used in the implementation of Python3+
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# module_name EncodedString Name of defining module
# class_def_node PyClassDefNode PyClassDefNode defining this class
# calculate_metaclass bool should call CalculateMetaclass()
# allow_py2_metaclass bool should look for Py2 metaclass
# force_type bool always create a "new style" class, even with no bases
subexprs = []
type = py_object_type
force_type = False
is_temp = True
def infer_type(self, env):
# TODO: could return 'type' in some cases
return py_object_type
def analyse_types(self, env):
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("Py3ClassCreate", "ObjectHandling.c"))
cname = code.intern_identifier(self.name)
class_def_node = self.class_def_node
mkw = class_def_node.mkw.py_result() if class_def_node.mkw else 'NULL'
if class_def_node.metaclass:
metaclass = class_def_node.metaclass.py_result()
elif self.force_type:
metaclass = "((PyObject*)&PyType_Type)"
else:
metaclass = "((PyObject*)&__Pyx_DefaultClassType)"
code.putln(
'%s = __Pyx_Py3ClassCreate(%s, %s, %s, %s, %s, %d, %d); %s' % (
self.result(),
metaclass,
cname,
class_def_node.bases.py_result(),
class_def_node.dict.py_result(),
mkw,
self.calculate_metaclass,
self.allow_py2_metaclass,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
class PyClassMetaclassNode(ExprNode):
# Helper class holds Python3 metaclass object
#
# class_def_node PyClassDefNode PyClassDefNode defining this class
subexprs = []
def analyse_types(self, env):
self.type = py_object_type
self.is_temp = True
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
bases = self.class_def_node.bases
mkw = self.class_def_node.mkw
if mkw:
code.globalstate.use_utility_code(
UtilityCode.load_cached("Py3MetaclassGet", "ObjectHandling.c"))
call = "__Pyx_Py3MetaclassGet(%s, %s)" % (
bases.result(),
mkw.result())
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CalculateMetaclass", "ObjectHandling.c"))
call = "__Pyx_CalculateMetaclass(NULL, %s)" % (
bases.result())
code.putln(
"%s = %s; %s" % (
self.result(), call,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
class PyClassNamespaceNode(ExprNode, ModuleNameMixin):
# Helper class holds Python3 namespace object
#
# All this are not owned by this node
# class_def_node PyClassDefNode PyClassDefNode defining this class
# doc ExprNode or None Doc string (owned)
subexprs = ['doc']
def analyse_types(self, env):
if self.doc:
self.doc = self.doc.analyse_types(env).coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
class_def_node = self.class_def_node
null = "(PyObject *) NULL"
doc_code = self.doc.result() if self.doc else null
mkw = class_def_node.mkw.py_result() if class_def_node.mkw else null
metaclass = class_def_node.metaclass.py_result() if class_def_node.metaclass else null
code.putln(
"%s = __Pyx_Py3MetaclassPrepare(%s, %s, %s, %s, %s, %s, %s); %s" % (
self.result(),
metaclass,
class_def_node.bases.result(),
cname,
qualname,
mkw,
py_mod_name,
doc_code,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
class ClassCellInjectorNode(ExprNode):
# Initialize CyFunction.func_classobj
is_temp = True
type = py_object_type
subexprs = []
is_active = False
def analyse_expressions(self, env):
return self
def generate_result_code(self, code):
assert self.is_active
code.putln(
'%s = PyList_New(0); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
def generate_injection_code(self, code, classobj_cname):
assert self.is_active
code.globalstate.use_utility_code(
UtilityCode.load_cached("CyFunctionClassCell", "CythonFunction.c"))
code.put_error_if_neg(self.pos, '__Pyx_CyFunction_InitClassCell(%s, %s)' % (
self.result(), classobj_cname))
class ClassCellNode(ExprNode):
# Class Cell for noargs super()
subexprs = []
is_temp = True
is_generator = False
type = py_object_type
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if not self.is_generator:
code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
self.result(),
Naming.self_cname))
else:
code.putln('%s = %s->classobj;' % (
self.result(), Naming.generator_cname))
code.putln(
'if (!%s) { PyErr_SetString(PyExc_SystemError, '
'"super(): empty __class__ cell"); %s }' % (
self.result(),
code.error_goto(self.pos)))
code.put_incref(self.result(), py_object_type)
class PyCFunctionNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# functions. Constructs a PyCFunction object
# from a PyMethodDef struct.
#
# pymethdef_cname string PyMethodDef structure
# binding bool
# def_node DefNode the Python function node
# module_name EncodedString Name of defining module
# code_object CodeObjectNode the PyCodeObject creator node
subexprs = ['code_object', 'defaults_tuple', 'defaults_kwdict',
'annotations_dict']
code_object = None
binding = False
def_node = None
defaults = None
defaults_struct = None
defaults_pyobjects = 0
defaults_tuple = None
defaults_kwdict = None
annotations_dict = None
type = py_object_type
is_temp = 1
specialized_cpdefs = None
is_specialization = False
@classmethod
def from_defnode(cls, node, binding):
return cls(node.pos,
def_node=node,
pymethdef_cname=node.entry.pymethdef_cname,
binding=binding or node.specialized_cpdefs,
specialized_cpdefs=node.specialized_cpdefs,
code_object=CodeObjectNode(node))
def analyse_types(self, env):
if self.binding:
self.analyse_default_args(env)
return self
def analyse_default_args(self, env):
"""
Handle non-literal function's default arguments.
"""
nonliteral_objects = []
nonliteral_other = []
default_args = []
default_kwargs = []
annotations = []
# For global cpdef functions and def/cpdef methods in cdef classes, we must use global constants
# for default arguments to avoid the dependency on the CyFunction object as 'self' argument
# in the underlying C function. Basically, cpdef functions/methods are static C functions,
# so their optional arguments must be static, too.
# TODO: change CyFunction implementation to pass both function object and owning object for method calls
must_use_constants = env.is_c_class_scope or (self.def_node.is_wrapper and env.is_module_scope)
for arg in self.def_node.args:
if arg.default and not must_use_constants:
if not arg.default.is_literal:
arg.is_dynamic = True
if arg.type.is_pyobject:
nonliteral_objects.append(arg)
else:
nonliteral_other.append(arg)
else:
arg.default = DefaultLiteralArgNode(arg.pos, arg.default)
if arg.kw_only:
default_kwargs.append(arg)
else:
default_args.append(arg)
if arg.annotation:
arg.annotation = arg.annotation.analyse_types(env)
annotations.append((arg.pos, arg.name, arg.annotation.string))
for arg in (self.def_node.star_arg, self.def_node.starstar_arg):
if arg and arg.annotation:
arg.annotation = arg.annotation.analyse_types(env)
annotations.append((arg.pos, arg.name, arg.annotation.string))
annotation = self.def_node.return_type_annotation
if annotation:
self.def_node.return_type_annotation = annotation.analyse_types(env)
annotations.append((annotation.pos, StringEncoding.EncodedString("return"),
annotation.string))
if nonliteral_objects or nonliteral_other:
module_scope = env.global_scope()
cname = module_scope.next_id(Naming.defaults_struct_prefix)
scope = Symtab.StructOrUnionScope(cname)
self.defaults = []
for arg in nonliteral_objects:
type_ = arg.type
if type_.is_buffer:
type_ = type_.base
entry = scope.declare_var(arg.name, type_, None,
Naming.arg_prefix + arg.name,
allow_pyobject=True)
self.defaults.append((arg, entry))
for arg in nonliteral_other:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=False, allow_memoryview=True)
self.defaults.append((arg, entry))
entry = module_scope.declare_struct_or_union(
None, 'struct', scope, 1, None, cname=cname)
self.defaults_struct = scope
self.defaults_pyobjects = len(nonliteral_objects)
for arg, entry in self.defaults:
arg.default_value = '%s->%s' % (
Naming.dynamic_args_cname, entry.cname)
self.def_node.defaults_struct = self.defaults_struct.name
if default_args or default_kwargs:
if self.defaults_struct is None:
if default_args:
defaults_tuple = TupleNode(self.pos, args=[
arg.default for arg in default_args])
self.defaults_tuple = defaults_tuple.analyse_types(env).coerce_to_pyobject(env)
if default_kwargs:
defaults_kwdict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
arg.pos,
key=IdentifierStringNode(arg.pos, value=arg.name),
value=arg.default)
for arg in default_kwargs])
self.defaults_kwdict = defaults_kwdict.analyse_types(env)
elif not self.specialized_cpdefs:
# Fused dispatch functions do not support (dynamic) default arguments, only the specialisations do.
if default_args:
defaults_tuple = DefaultsTupleNode(
self.pos, default_args, self.defaults_struct)
else:
defaults_tuple = NoneNode(self.pos)
if default_kwargs:
defaults_kwdict = DefaultsKwDictNode(
self.pos, default_kwargs, self.defaults_struct)
else:
defaults_kwdict = NoneNode(self.pos)
defaults_getter = Nodes.DefNode(
self.pos, args=[], star_arg=None, starstar_arg=None,
body=Nodes.ReturnStatNode(
self.pos, return_type=py_object_type,
value=TupleNode(
self.pos, args=[defaults_tuple, defaults_kwdict])),
decorators=None,
name=StringEncoding.EncodedString("__defaults__"))
# defaults getter must never live in class scopes, it's always a module function
module_scope = env.global_scope()
defaults_getter.analyse_declarations(module_scope)
defaults_getter = defaults_getter.analyse_expressions(module_scope)
defaults_getter.body = defaults_getter.body.analyse_expressions(
defaults_getter.local_scope)
defaults_getter.py_wrapper_required = False
defaults_getter.pymethdef_required = False
self.def_node.defaults_getter = defaults_getter
if annotations:
annotations_dict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
pos, key=IdentifierStringNode(pos, value=name),
value=value)
for pos, name, value in annotations])
self.annotations_dict = annotations_dict.analyse_types(env)
def may_be_none(self):
return False
gil_message = "Constructing Python function"
def closure_result_code(self):
return "NULL"
def generate_result_code(self, code):
if self.binding:
self.generate_cyfunction_code(code)
else:
self.generate_pycfunction_code(code)
def generate_pycfunction_code(self, code):
py_mod_name = self.get_py_mod_name(code)
code.putln(
'%s = PyCFunction_NewEx(&%s, %s, %s); %s' % (
self.result(),
self.pymethdef_cname,
self.closure_result_code(),
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
def generate_cyfunction_code(self, code):
if self.specialized_cpdefs:
def_node = self.specialized_cpdefs[0]
else:
def_node = self.def_node
if self.specialized_cpdefs or self.is_specialization:
code.globalstate.use_utility_code(
UtilityCode.load_cached("FusedFunction", "CythonFunction.c"))
constructor = "__pyx_FusedFunction_New"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CythonFunction", "CythonFunction.c"))
constructor = "__Pyx_CyFunction_New"
if self.code_object:
code_object_result = self.code_object.py_result()
else:
code_object_result = 'NULL'
flags = []
if def_node.is_staticmethod:
flags.append('__Pyx_CYFUNCTION_STATICMETHOD')
elif def_node.is_classmethod:
flags.append('__Pyx_CYFUNCTION_CLASSMETHOD')
if def_node.local_scope.parent_scope.is_c_class_scope and not def_node.entry.is_anonymous:
flags.append('__Pyx_CYFUNCTION_CCLASS')
if flags:
flags = ' | '.join(flags)
else:
flags = '0'
code.putln(
'%s = %s(&%s, %s, %s, %s, %s, %s, %s); %s' % (
self.result(),
constructor,
self.pymethdef_cname,
flags,
self.get_py_qualified_name(code),
self.closure_result_code(),
self.get_py_mod_name(code),
Naming.moddict_cname,
code_object_result,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
if def_node.requires_classobj:
assert code.pyclass_stack, "pyclass_stack is empty"
class_node = code.pyclass_stack[-1]
code.put_incref(self.py_result(), py_object_type)
code.putln(
'PyList_Append(%s, %s);' % (
class_node.class_cell.result(),
self.result()))
self.generate_giveref(code)
if self.defaults:
code.putln(
'if (!__Pyx_CyFunction_InitDefaults(%s, sizeof(%s), %d)) %s' % (
self.result(), self.defaults_struct.name,
self.defaults_pyobjects, code.error_goto(self.pos)))
defaults = '__Pyx_CyFunction_Defaults(%s, %s)' % (
self.defaults_struct.name, self.result())
for arg, entry in self.defaults:
arg.generate_assignment_code(code, target='%s->%s' % (
defaults, entry.cname))
if self.defaults_tuple:
code.putln('__Pyx_CyFunction_SetDefaultsTuple(%s, %s);' % (
self.result(), self.defaults_tuple.py_result()))
if not self.specialized_cpdefs:
# disable introspection functions for fused dispatcher function since the user never sees it
# TODO: this is mostly disabled because the attributes end up pointing to ones belonging
# to the specializations - ideally this would be fixed instead
if self.defaults_kwdict:
code.putln('__Pyx_CyFunction_SetDefaultsKwDict(%s, %s);' % (
self.result(), self.defaults_kwdict.py_result()))
if def_node.defaults_getter:
code.putln('__Pyx_CyFunction_SetDefaultsGetter(%s, %s);' % (
self.result(), def_node.defaults_getter.entry.pyfunc_cname))
if self.annotations_dict:
code.putln('__Pyx_CyFunction_SetAnnotationsDict(%s, %s);' % (
self.result(), self.annotations_dict.py_result()))
class InnerFunctionNode(PyCFunctionNode):
# Special PyCFunctionNode that depends on a closure class
binding = True
needs_closure_code = True
def closure_result_code(self):
if self.needs_closure_code:
return "((PyObject*)%s)" % Naming.cur_scope_cname
return "NULL"
class CodeObjectNode(ExprNode):
# Create a PyCodeObject for a CyFunction instance.
#
# def_node DefNode the Python function node
# varnames TupleNode a tuple with all local variable names
subexprs = ['varnames']
is_temp = False
result_code = None
def __init__(self, def_node):
ExprNode.__init__(self, def_node.pos, def_node=def_node)
args = list(def_node.args)
# if we have args/kwargs, then the first two in var_entries are those
local_vars = [arg for arg in def_node.local_scope.var_entries if arg.name]
self.varnames = TupleNode(
def_node.pos,
args=[IdentifierStringNode(arg.pos, value=arg.name)
for arg in args + local_vars],
is_temp=0,
is_literal=1)
def may_be_none(self):
return False
def calculate_result_code(self, code=None):
if self.result_code is None:
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
return self.result_code
def generate_result_code(self, code):
if self.result_code is None:
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
code = code.get_cached_constants_writer(self.result_code)
if code is None:
return # already initialised
code.mark_pos(self.pos)
func = self.def_node
func_name = code.get_py_string_const(
func.name, identifier=True, is_str=False, unicode_value=func.name)
# FIXME: better way to get the module file path at module init time? Encoding to use?
file_path = StringEncoding.bytes_literal(func.pos[0].get_filenametable_entry().encode('utf8'), 'utf8')
file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True)
# This combination makes CPython create a new dict for "frame.f_locals" (see GH #1836).
flags = ['CO_OPTIMIZED', 'CO_NEWLOCALS']
if self.def_node.star_arg:
flags.append('CO_VARARGS')
if self.def_node.starstar_arg:
flags.append('CO_VARKEYWORDS')
code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % (
self.result_code,
len(func.args) - func.num_kwonly_args, # argcount
func.num_posonly_args, # posonlyargcount (Py3.8+ only)
func.num_kwonly_args, # kwonlyargcount (Py3 only)
len(self.varnames.args), # nlocals
'|'.join(flags) or '0', # flags
Naming.empty_bytes, # code
Naming.empty_tuple, # consts
Naming.empty_tuple, # names (FIXME)
self.varnames.result(), # varnames
Naming.empty_tuple, # freevars (FIXME)
Naming.empty_tuple, # cellvars (FIXME)
file_path_const, # filename
func_name, # name
self.pos[1], # firstlineno
Naming.empty_bytes, # lnotab
code.error_goto_if_null(self.result_code, self.pos),
))
class DefaultLiteralArgNode(ExprNode):
# CyFunction's literal argument default value
#
# Evaluate literal only once.
subexprs = []
is_literal = True
is_temp = False
def __init__(self, pos, arg):
super(DefaultLiteralArgNode, self).__init__(pos)
self.arg = arg
self.type = self.arg.type
self.evaluated = False
def analyse_types(self, env):
return self
def generate_result_code(self, code):
pass
def generate_evaluation_code(self, code):
if not self.evaluated:
self.arg.generate_evaluation_code(code)
self.evaluated = True
def result(self):
return self.type.cast_code(self.arg.result())
class DefaultNonLiteralArgNode(ExprNode):
# CyFunction's non-literal argument default value
subexprs = []
def __init__(self, pos, arg, defaults_struct):
super(DefaultNonLiteralArgNode, self).__init__(pos)
self.arg = arg
self.defaults_struct = defaults_struct
def analyse_types(self, env):
self.type = self.arg.type
self.is_temp = False
return self
def generate_result_code(self, code):
pass
def result(self):
return '__Pyx_CyFunction_Defaults(%s, %s)->%s' % (
self.defaults_struct.name, Naming.self_cname,
self.defaults_struct.lookup(self.arg.name).cname)
class DefaultsTupleNode(TupleNode):
# CyFunction's __defaults__ tuple
def __init__(self, pos, defaults, defaults_struct):
args = []
for arg in defaults:
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
args.append(arg)
super(DefaultsTupleNode, self).__init__(pos, args=args)
def analyse_types(self, env, skip_children=False):
return super(DefaultsTupleNode, self).analyse_types(env, skip_children).coerce_to_pyobject(env)
class DefaultsKwDictNode(DictNode):
# CyFunction's __kwdefaults__ dict
def __init__(self, pos, defaults, defaults_struct):
items = []
for arg in defaults:
name = IdentifierStringNode(arg.pos, value=arg.name)
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
items.append(DictItemNode(arg.pos, key=name, value=arg))
super(DefaultsKwDictNode, self).__init__(pos, key_value_pairs=items)
class LambdaNode(InnerFunctionNode):
# Lambda expression node (only used as a function reference)
#
# args [CArgDeclNode] formal arguments
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
# lambda_name string a module-globally unique lambda name
# result_expr ExprNode
# def_node DefNode the underlying function 'def' node
child_attrs = ['def_node']
name = StringEncoding.EncodedString('')
def analyse_declarations(self, env):
self.lambda_name = self.def_node.lambda_name = env.next_id('lambda')
self.def_node.no_assignment_synthesis = True
self.def_node.pymethdef_required = True
self.def_node.is_cyfunction = True
self.def_node.analyse_declarations(env)
self.pymethdef_cname = self.def_node.entry.pymethdef_cname
env.add_lambda_def(self.def_node)
def analyse_types(self, env):
self.def_node = self.def_node.analyse_expressions(env)
return super(LambdaNode, self).analyse_types(env)
def generate_result_code(self, code):
self.def_node.generate_execution_code(code)
super(LambdaNode, self).generate_result_code(code)
class GeneratorExpressionNode(LambdaNode):
# A generator expression, e.g. (i for i in range(10))
#
# Result is a generator.
#
# loop ForStatNode the for-loop, containing a YieldExprNode
# def_node DefNode the underlying generator 'def' node
name = StringEncoding.EncodedString('genexpr')
binding = False
def analyse_declarations(self, env):
self.genexpr_name = env.next_id('genexpr')
super(GeneratorExpressionNode, self).analyse_declarations(env)
# No pymethdef required
self.def_node.pymethdef_required = False
self.def_node.py_wrapper_required = False
self.def_node.is_cyfunction = False
# Force genexpr signature
self.def_node.entry.signature = TypeSlots.pyfunction_noargs
def generate_result_code(self, code):
code.putln(
'%s = %s(%s); %s' % (
self.result(),
self.def_node.entry.pyfunc_cname,
self.closure_result_code(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
class YieldExprNode(ExprNode):
# Yield expression node
#
# arg ExprNode the value to return from the generator
# label_num integer yield label number
# is_yield_from boolean is a YieldFromExprNode to delegate to another generator
subexprs = ['arg']
type = py_object_type
label_num = 0
is_yield_from = False
is_await = False
in_async_gen = False
expr_keyword = 'yield'
def analyse_types(self, env):
if not self.label_num or (self.is_yield_from and self.in_async_gen):
error(self.pos, "'%s' not supported here" % self.expr_keyword)
self.is_temp = 1
if self.arg is not None:
self.arg = self.arg.analyse_types(env)
if not self.arg.type.is_pyobject:
self.coerce_yield_argument(env)
return self
def coerce_yield_argument(self, env):
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
if self.arg:
self.arg.generate_evaluation_code(code)
self.arg.make_owned_reference(code)
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_post_assignment_code(code)
self.arg.free_temps(code)
else:
code.put_init_to_py_none(Naming.retval_cname, py_object_type)
self.generate_yield_code(code)
def generate_yield_code(self, code):
"""
Generate the code to return the argument in 'Naming.retval_cname'
and to continue at the yield label.
"""
label_num, label_name = code.new_yield_label(
self.expr_keyword.replace(' ', '_'))
code.use_label(label_name)
saved = []
code.funcstate.closure_temps.reset()
for cname, type, manage_ref in code.funcstate.temps_in_use():
save_cname = code.funcstate.closure_temps.allocate_temp(type)
saved.append((cname, save_cname, type))
code.put_xgiveref(cname, type)
code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname))
code.put_xgiveref(Naming.retval_cname, py_object_type)
profile = code.globalstate.directives['profile']
linetrace = code.globalstate.directives['linetrace']
if profile or linetrace:
code.put_trace_return(Naming.retval_cname,
nogil=not code.funcstate.gil_owned)
code.put_finish_refcount_context()
if code.funcstate.current_except is not None:
# inside of an except block => save away currently handled exception
code.putln("__Pyx_Coroutine_SwapException(%s);" % Naming.generator_cname)
else:
# no exceptions being handled => restore exception state of caller
code.putln("__Pyx_Coroutine_ResetAndClearException(%s);" % Naming.generator_cname)
code.putln("/* return from %sgenerator, %sing value */" % (
'async ' if self.in_async_gen else '',
'await' if self.is_await else 'yield'))
code.putln("%s->resume_label = %d;" % (
Naming.generator_cname, label_num))
if self.in_async_gen and not self.is_await:
# __Pyx__PyAsyncGenValueWrapperNew() steals a reference to the return value
code.putln("return __Pyx__PyAsyncGenValueWrapperNew(%s);" % Naming.retval_cname)
else:
code.putln("return %s;" % Naming.retval_cname)
code.put_label(label_name)
for cname, save_cname, type in saved:
code.putln('%s = %s->%s;' % (cname, Naming.cur_scope_cname, save_cname))
if type.is_pyobject:
code.putln('%s->%s = 0;' % (Naming.cur_scope_cname, save_cname))
code.put_xgotref(cname, type)
self.generate_sent_value_handling_code(code, Naming.sent_value_cname)
if self.result_is_used:
self.allocate_temp_result(code)
code.put('%s = %s; ' % (self.result(), Naming.sent_value_cname))
code.put_incref(self.result(), py_object_type)
def generate_sent_value_handling_code(self, code, value_cname):
code.putln(code.error_goto_if_null(value_cname, self.pos))
class _YieldDelegationExprNode(YieldExprNode):
def yield_from_func(self, code):
raise NotImplementedError()
def generate_evaluation_code(self, code, source_cname=None, decref_source=False):
if source_cname is None:
self.arg.generate_evaluation_code(code)
code.putln("%s = %s(%s, %s);" % (
Naming.retval_cname,
self.yield_from_func(code),
Naming.generator_cname,
self.arg.py_result() if source_cname is None else source_cname))
if source_cname is None:
self.arg.generate_disposal_code(code)
self.arg.free_temps(code)
elif decref_source:
code.put_decref_clear(source_cname, py_object_type)
code.put_xgotref(Naming.retval_cname, py_object_type)
code.putln("if (likely(%s)) {" % Naming.retval_cname)
self.generate_yield_code(code)
code.putln("} else {")
# either error or sub-generator has normally terminated: return value => node result
if self.result_is_used:
self.fetch_iteration_result(code)
else:
self.handle_iteration_exception(code)
code.putln("}")
def fetch_iteration_result(self, code):
# YieldExprNode has allocated the result temp for us
code.putln("%s = NULL;" % self.result())
code.put_error_if_neg(self.pos, "__Pyx_PyGen_FetchStopIterationValue(&%s)" % self.result())
self.generate_gotref(code)
def handle_iteration_exception(self, code):
code.putln("PyObject* exc_type = __Pyx_PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(exc_type == PyExc_StopIteration || (exc_type != PyExc_GeneratorExit &&"
" __Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
class YieldFromExprNode(_YieldDelegationExprNode):
# "yield from GEN" expression
is_yield_from = True
expr_keyword = 'yield from'
def coerce_yield_argument(self, env):
if not self.arg.type.is_string:
# FIXME: support C arrays and C++ iterators?
error(self.pos, "yielding from non-Python object not supported")
self.arg = self.arg.coerce_to_pyobject(env)
def yield_from_func(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("GeneratorYieldFrom", "Coroutine.c"))
return "__Pyx_Generator_Yield_From"
class AwaitExprNode(_YieldDelegationExprNode):
# 'await' expression node
#
# arg ExprNode the Awaitable value to await
# label_num integer yield label number
is_await = True
expr_keyword = 'await'
def coerce_yield_argument(self, env):
if self.arg is not None:
# FIXME: use same check as in YieldFromExprNode.coerce_yield_argument() ?
self.arg = self.arg.coerce_to_pyobject(env)
def yield_from_func(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("CoroutineYieldFrom", "Coroutine.c"))
return "__Pyx_Coroutine_Yield_From"
class AwaitIterNextExprNode(AwaitExprNode):
# 'await' expression node as part of 'async for' iteration
#
# Breaks out of loop on StopAsyncIteration exception.
def _generate_break(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
code.putln("PyObject* exc_type = __Pyx_PyErr_Occurred();")
code.putln("if (unlikely(exc_type && (exc_type == __Pyx_PyExc_StopAsyncIteration || ("
" exc_type != PyExc_StopIteration && exc_type != PyExc_GeneratorExit &&"
" __Pyx_PyErr_GivenExceptionMatches(exc_type, __Pyx_PyExc_StopAsyncIteration))))) {")
code.putln("PyErr_Clear();")
code.putln("break;")
code.putln("}")
def fetch_iteration_result(self, code):
assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
self._generate_break(code)
super(AwaitIterNextExprNode, self).fetch_iteration_result(code)
def generate_sent_value_handling_code(self, code, value_cname):
assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
code.putln("if (unlikely(!%s)) {" % value_cname)
self._generate_break(code)
# all non-break exceptions are errors, as in parent class
code.putln(code.error_goto(self.pos))
code.putln("}")
class GlobalsExprNode(AtomicExprNode):
type = dict_type
is_temp = 1
def analyse_types(self, env):
env.use_utility_code(Builtin.globals_utility_code)
return self
gil_message = "Constructing globals dict"
def may_be_none(self):
return False
def generate_result_code(self, code):
code.putln('%s = __Pyx_Globals(); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
class LocalsDictItemNode(DictItemNode):
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
if self.value.type.can_coerce_to_pyobject(env):
self.value = self.value.coerce_to_pyobject(env)
else:
self.value = None
return self
class FuncLocalsExprNode(DictNode):
def __init__(self, pos, env):
local_vars = sorted([
entry.name for entry in env.entries.values() if entry.name])
items = [LocalsDictItemNode(
pos, key=IdentifierStringNode(pos, value=var),
value=NameNode(pos, name=var, allow_null=True))
for var in local_vars]
DictNode.__init__(self, pos, key_value_pairs=items,
exclude_null_values=True)
def analyse_types(self, env):
node = super(FuncLocalsExprNode, self).analyse_types(env)
node.key_value_pairs = [ i for i in node.key_value_pairs
if i.value is not None ]
return node
class PyClassLocalsExprNode(AtomicExprNode):
def __init__(self, pos, pyclass_dict):
AtomicExprNode.__init__(self, pos)
self.pyclass_dict = pyclass_dict
def analyse_types(self, env):
self.type = self.pyclass_dict.type
self.is_temp = False
return self
def may_be_none(self):
return False
def result(self):
return self.pyclass_dict.result()
def generate_result_code(self, code):
pass
def LocalsExprNode(pos, scope_node, env):
if env.is_module_scope:
return GlobalsExprNode(pos)
if env.is_py_class_scope:
return PyClassLocalsExprNode(pos, scope_node.dict)
return FuncLocalsExprNode(pos, env)
#-------------------------------------------------------------------
#
# Unary operator nodes
#
#-------------------------------------------------------------------
compile_time_unary_operators = {
'not': operator.not_,
'~': operator.inv,
'-': operator.neg,
'+': operator.pos,
}
class UnopNode(ExprNode):
# operator string
# operand ExprNode
# op_func_type CFuncType or None
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when the operand is not a pyobject.
# - Check operand type and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand']
infix = True
op_func_type = None
def calculate_constant_result(self):
func = compile_time_unary_operators[self.operator]
self.constant_result = func(self.operand.constant_result)
def compile_time_value(self, denv):
func = compile_time_unary_operators.get(self.operator)
if not func:
error(self.pos,
"Unary '%s' not supported in compile-time expression"
% self.operator)
operand = self.operand.compile_time_value(denv)
try:
return func(operand)
except Exception as e:
self.compile_time_value_error(e)
def infer_type(self, env):
operand_type = self.operand.infer_type(env)
if operand_type.is_cpp_class or operand_type.is_ptr:
cpp_type = operand_type.find_cpp_operation_type(self.operator)
if cpp_type is not None:
return cpp_type
return self.infer_unop_type(env, operand_type)
def infer_unop_type(self, env, operand_type):
if operand_type.is_pyobject:
return py_object_type
else:
return operand_type
def may_be_none(self):
if self.operand.type and self.operand.type.is_builtin_type:
if self.operand.type is not type_type:
return False
return ExprNode.may_be_none(self)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
if self.is_pythran_operation(env):
self.type = PythranExpr(pythran_unaryop_type(self.operator, self.operand.type))
self.is_temp = 1
elif self.is_py_operation():
self.coerce_operand_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
return self
def check_const(self):
return self.operand.check_const()
def is_py_operation(self):
return self.operand.type.is_pyobject or self.operand.type.is_ctuple
def is_pythran_operation(self, env):
np_pythran = has_np_pythran(env)
op_type = self.operand.type
return np_pythran and (op_type.is_buffer or op_type.is_pythran_expr)
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def is_cpp_operation(self):
type = self.operand.type
return type.is_cpp_class
def coerce_operand_to_pyobject(self, env):
self.operand = self.operand.coerce_to_pyobject(env)
def generate_result_code(self, code):
if self.type.is_pythran_expr:
code.putln("// Pythran unaryop")
code.putln("__Pyx_call_destructor(%s);" % self.result())
code.putln("new (&%s) decltype(%s){%s%s};" % (
self.result(),
self.result(),
self.operator,
self.operand.pythran_result()))
elif self.operand.type.is_pyobject:
self.generate_py_operation_code(code)
elif self.is_temp:
if self.is_cpp_operation() and self.exception_check == '+':
translate_cpp_exception(code, self.pos,
"%s = %s %s;" % (self.result(), self.operator, self.operand.result()),
self.result() if self.type.is_pyobject else None,
self.exception_value, self.in_nogil_context)
else:
code.putln("%s = %s %s;" % (self.result(), self.operator, self.operand.result()))
def generate_py_operation_code(self, code):
function = self.py_operation_function(code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
function,
self.operand.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
def type_error(self):
if not self.operand.type.is_error:
error(self.pos, "Invalid operand type for '%s' (%s)" %
(self.operator, self.operand.type))
self.type = PyrexTypes.error_type
def analyse_cpp_operation(self, env, overload_check=True):
entry = env.lookup_operator(self.operator, [self.operand])
if overload_check and not entry:
self.type_error()
return
if entry:
self.op_func_type = entry.type
self.exception_check = entry.type.exception_check
self.exception_value = entry.type.exception_value
if self.exception_check == '+':
self.is_temp = True
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
else:
self.exception_check = ''
self.exception_value = ''
cpp_type = self.operand.type.find_cpp_operation_type(self.operator)
if overload_check and cpp_type is None:
error(self.pos, "'%s' operator not defined for %s" % (
self.operator, type))
self.type_error()
return
self.type = cpp_type
def calculate_operation_result_code(self, operator):
op = self.operand.result()
if self.operand.type.is_cyp_class:
op = "(*%s)" % op
return "(%s%s)" % (operator, op)
class NotNode(UnopNode):
# 'not' operator
#
# operand ExprNode
operator = '!'
type = PyrexTypes.c_bint_type
def calculate_constant_result(self):
self.constant_result = not self.operand.constant_result
def compile_time_value(self, denv):
operand = self.operand.compile_time_value(denv)
try:
return not operand
except Exception as e:
self.compile_time_value_error(e)
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_bint_type
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
operand_type = self.operand.type
if operand_type.is_cpp_class and not operand_type.is_cyp_class:
self.analyse_cpp_operation(env)
else:
self.operand = self.operand.coerce_to_boolean(env)
return self
def calculate_result_code(self):
return "(!%s)" % self.operand.result()
class UnaryPlusNode(UnopNode):
# unary '+' operator
operator = '+'
def analyse_c_operation(self, env):
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
def py_operation_function(self, code):
return "PyNumber_Positive"
def calculate_result_code(self):
if self.is_cpp_operation():
return self.calculate_operation_result_code(self.operator)
else:
return self.operand.result()
class UnaryMinusNode(UnopNode):
# unary '-' operator
operator = '-'
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
if self.type.is_complex:
self.infix = False
def py_operation_function(self, code):
return "PyNumber_Negative"
def calculate_result_code(self):
if self.infix:
return self.calculate_operation_result_code(self.operator)
else:
return "%s(%s)" % (self.operand.type.unary_op('-'), self.operand.result())
def get_constant_c_result_code(self):
value = self.operand.get_constant_c_result_code()
if value:
return "(-%s)" % value
class TildeNode(UnopNode):
# unary '~' operator
def analyse_c_operation(self, env):
if self.operand.type.is_int:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
def py_operation_function(self, code):
return "PyNumber_Invert"
def calculate_result_code(self):
return self.calculate_operation_result_code('~')
class CUnopNode(UnopNode):
def is_py_operation(self):
return False
class DereferenceNode(CUnopNode):
# unary * operator
operator = '*'
def infer_unop_type(self, env, operand_type):
if operand_type.is_ptr:
return operand_type.base_type
else:
return PyrexTypes.error_type
def analyse_c_operation(self, env):
if self.operand.type.is_ptr:
if env.is_cpp:
self.type = PyrexTypes.CReferenceType(self.operand.type.base_type)
else:
self.type = self.operand.type.base_type
else:
self.type_error()
def calculate_result_code(self):
return "(*%s)" % self.operand.result()
class DecrementIncrementNode(CUnopNode):
# unary ++/-- operator
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_ptr:
self.type = self.operand.type
else:
self.type_error()
def calculate_result_code(self):
if self.is_prefix:
return "(%s%s)" % (self.operator, self.operand.result())
else:
return "(%s%s)" % (self.operand.result(), self.operator)
def inc_dec_constructor(is_prefix, operator):
return lambda pos, **kwds: DecrementIncrementNode(pos, is_prefix=is_prefix, operator=operator, **kwds)
class AmpersandNode(CUnopNode):
# The C address-of operator.
#
# operand ExprNode
operator = '&'
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_ptr_type(operand_type)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
argtype = self.operand.type
if argtype.is_cpp_class:
if argtype.is_cyp_class:
self.error("Cannot take address of cypclass")
self.analyse_cpp_operation(env, overload_check=False)
if not (argtype.is_cfunction or argtype.is_reference or self.operand.is_addressable()):
if argtype.is_memoryviewslice:
self.error("Cannot take address of memoryview slice")
else:
self.error("Taking address of non-lvalue (type %s)" % argtype)
return self
if argtype.is_pyobject:
self.error("Cannot take address of Python %s" % (
"variable '%s'" % self.operand.name if self.operand.is_name else
"object attribute '%s'" % self.operand.attribute if self.operand.is_attribute else
"object"))
return self
if not argtype.is_cpp_class or not self.type:
self.type = PyrexTypes.c_ptr_type(argtype)
return self
def check_const(self):
return self.operand.check_const_addr()
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = ""
def calculate_result_code(self):
return "(&%s)" % self.operand.result()
def generate_result_code(self, code):
if (self.operand.type.is_cpp_class and self.exception_check == '+'):
translate_cpp_exception(code, self.pos,
"%s = %s %s;" % (self.result(), self.operator, self.operand.result()),
self.result() if self.type.is_pyobject else None,
self.exception_value, self.in_nogil_context)
unop_node_classes = {
"+": UnaryPlusNode,
"-": UnaryMinusNode,
"~": TildeNode,
}
def unop_node(pos, operator, operand):
# Construct unnop node of appropriate class for
# given operator.
if isinstance(operand, IntNode) and operator == '-':
return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)),
longness=operand.longness, unsigned=operand.unsigned)
elif isinstance(operand, UnopNode) and operand.operator == operator in '+-':
warning(pos, "Python has no increment/decrement operator: %s%sx == %s(%sx) == x" % ((operator,)*4), 5)
return unop_node_classes[operator](pos,
operator = operator,
operand = operand)
class TypecastNode(ExprNode):
# C type cast
#
# operand ExprNode
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# typecheck boolean
# overloaded boolean
# op_func_type CFuncType or None
#
# If used from a transform, one can if wanted specify the attribute
# "type" directly and leave base_type and declarator to None
subexprs = ['operand']
base_type = declarator = type = None
overloaded = False
op_func_type = None
def type_dependencies(self, env):
return ()
def infer_type(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
return self.type
def analyse_types(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
if self.operand.has_constant_result():
# Must be done after self.type is resolved.
self.calculate_constant_result()
if self.type.is_cfunction:
error(self.pos,
"Cannot cast to a function type")
self.type = PyrexTypes.error_type
self.operand = self.operand.analyse_types(env)
if self.type is PyrexTypes.c_bint_type:
# short circuit this to a coercion
return self.operand.coerce_to_boolean(env)
to_py = self.type.is_pyobject
from_py = self.operand.type.is_pyobject
if from_py and not to_py and self.operand.is_ephemeral():
if not self.type.is_numeric and not self.type.is_cpp_class:
error(self.pos, "Casting temporary Python object to non-numeric non-Python type")
if to_py and not from_py:
if self.type is bytes_type and self.operand.type.is_int:
return CoerceIntToBytesNode(self.operand, env)
elif self.operand.type.can_coerce_to_pyobject(env):
self.result_ctype = py_object_type
self.operand = self.operand.coerce_to(self.type, env)
else:
if self.operand.type.is_ptr:
if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast from pointers of primitive types")
else:
# Should this be an error?
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (
self.operand.type, self.type))
self.operand = self.operand.coerce_to_simple(env)
elif from_py and not to_py:
if self.type.create_from_py_utility_code(env):
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_ptr:
if not (self.type.base_type.is_void or self.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast to pointers of primitive types")
else:
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (
self.type, self.operand.type))
elif from_py and to_py:
if self.typecheck:
self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True)
elif isinstance(self.operand, SliceIndexNode):
# This cast can influence the created type of string slices.
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_complex and self.operand.type.is_complex:
self.operand = self.operand.coerce_to_simple(env)
elif self.operand.type.is_fused:
self.operand = self.operand.coerce_to(self.type, env)
#self.type = self.operand.type
elif self.operand.type.is_cpp_class:
operator = 'operator ' + self.type.declaration_code('')
entry = self.operand.type.scope.lookup_here(operator)
self.overloaded = entry is not None
if entry:
self.op_func_type = entry.type
if self.type.is_cyp_class:
self.is_temp = self.overloaded
if self.type.is_qualified_cyp_class:
if not self.type.assignable_from(self.operand.type):
error(self.pos, "Cannot cast %s to %s" % (self.operand.type, self.type))
if self.type.is_ptr and self.type.base_type.is_cfunction and self.type.base_type.nogil:
op_type = self.operand.type
if op_type.is_ptr:
op_type = op_type.base_type
if op_type.is_cfunction and not op_type.nogil:
warning(self.pos,
"Casting a GIL-requiring function into a nogil function circumvents GIL validation", 1)
return self
def is_simple(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_simple()
def is_ephemeral(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_ephemeral()
def nonlocally_immutable(self):
return self.is_temp or self.operand.nonlocally_immutable()
def nogil_check(self, env):
if self.type and self.type.is_pyobject and self.is_temp:
self.gil_error()
def check_const(self):
return self.operand.check_const()
def calculate_constant_result(self):
self.constant_result = self.calculate_result_code(self.operand.constant_result)
def calculate_result_code(self, operand_result = None):
if operand_result is None:
operand_result = self.operand.result()
if self.type.is_complex:
operand_result = self.operand.result()
if self.operand.type.is_complex:
real_part = self.type.real_type.cast_code("__Pyx_CREAL(%s)" % operand_result)
imag_part = self.type.real_type.cast_code("__Pyx_CIMAG(%s)" % operand_result)
else:
real_part = self.type.real_type.cast_code(operand_result)
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
else:
operand_type = self.operand.type
if operand_type.is_cyp_class:
if self.overloaded:
operand_result = '(*%s)' % operand_result
# use dynamic cast when dowcasting from a base to a cypclass
if self.type.is_cyp_class and operand_type in self.type.mro() and not self.type.same_as(operand_type):
return self.type.dynamic_cast_code(operand_result)
return self.type.cast_code(operand_result)
def get_constant_c_result_code(self):
operand_result = self.operand.get_constant_c_result_code()
if operand_result:
return self.type.cast_code(operand_result)
def result_as(self, type):
if self.type.is_pyobject and not self.is_temp:
# Optimise away some unnecessary casting
return self.operand.result_as(type)
else:
return ExprNode.result_as(self, type)
def generate_result_code(self, code):
if self.is_temp:
if self.type.is_pyobject:
code.putln(
"%s = (PyObject *)%s;" % (
self.result(),
self.operand.result()))
code.put_incref(self.result(), self.ctype())
elif self.type.is_cyp_class:
star = "*" if self.overloaded else ""
operand_result = "%s%s" % (star, self.operand.result())
# use dynamic cast when dowcasting from a base to a cypclass
if self.operand.type in self.type.mro() and not self.type.same_as(self.operand.type):
code.putln(
"%s = dynamic_cast<%s>(%s);" % (
self.result(),
self.type.declaration_code(''),
operand_result))
else:
code.putln(
"%s = (%s)(%s);" % (
self.result(),
self.type.declaration_code(''),
operand_result))
code.put_incref(self.result(), self.type)
ERR_START = "Start may not be given"
ERR_NOT_STOP = "Stop must be provided to indicate shape"
ERR_STEPS = ("Strides may only be given to indicate contiguity. "
"Consider slicing it after conversion")
ERR_NOT_POINTER = "Can only create cython.array from pointer or array"
ERR_BASE_TYPE = "Pointer base type does not match cython.array base type"
class CythonArrayNode(ExprNode):
"""
Used when a pointer of base_type is cast to a memoryviewslice with that
base type. i.e.
p
creates a fortran-contiguous cython.array.
We leave the type set to object so coercions to object are more efficient
and less work. Acquiring a memoryviewslice from this will be just as
efficient. ExprNode.coerce_to() will do the additional typecheck on
self.compile_time_type
This also handles my_c_array
operand ExprNode the thing we're casting
base_type_node MemoryViewSliceTypeNode the cast expression node
"""
subexprs = ['operand', 'shapes']
shapes = None
is_temp = True
mode = "c"
array_dtype = None
shape_type = PyrexTypes.c_py_ssize_t_type
def analyse_types(self, env):
from . import MemoryView
self.operand = self.operand.analyse_types(env)
if self.array_dtype:
array_dtype = self.array_dtype
else:
array_dtype = self.base_type_node.base_type_node.analyse(env)
axes = self.base_type_node.axes
self.type = error_type
self.shapes = []
ndim = len(axes)
# Base type of the pointer or C array we are converting
base_type = self.operand.type
if not self.operand.type.is_ptr and not self.operand.type.is_array:
error(self.operand.pos, ERR_NOT_POINTER)
return self
# Dimension sizes of C array
array_dimension_sizes = []
if base_type.is_array:
while base_type.is_array:
array_dimension_sizes.append(base_type.size)
base_type = base_type.base_type
elif base_type.is_ptr:
base_type = base_type.base_type
else:
error(self.pos, "unexpected base type %s found" % base_type)
return self
if not (base_type.same_as(array_dtype) or base_type.is_void):
error(self.operand.pos, ERR_BASE_TYPE)
return self
elif self.operand.type.is_array and len(array_dimension_sizes) != ndim:
error(self.operand.pos,
"Expected %d dimensions, array has %d dimensions" %
(ndim, len(array_dimension_sizes)))
return self
# Verify the start, stop and step values
# In case of a C array, use the size of C array in each dimension to
# get an automatic cast
for axis_no, axis in enumerate(axes):
if not axis.start.is_none:
error(axis.start.pos, ERR_START)
return self
if axis.stop.is_none:
if array_dimension_sizes:
dimsize = array_dimension_sizes[axis_no]
axis.stop = IntNode(self.pos, value=str(dimsize),
constant_result=dimsize,
type=PyrexTypes.c_int_type)
else:
error(axis.pos, ERR_NOT_STOP)
return self
axis.stop = axis.stop.analyse_types(env)
shape = axis.stop.coerce_to(self.shape_type, env)
if not shape.is_literal:
shape.coerce_to_temp(env)
self.shapes.append(shape)
first_or_last = axis_no in (0, ndim - 1)
if not axis.step.is_none and first_or_last:
# '1' in the first or last dimension denotes F or C contiguity
axis.step = axis.step.analyse_types(env)
if (not axis.step.type.is_int and axis.step.is_literal and not
axis.step.type.is_error):
error(axis.step.pos, "Expected an integer literal")
return self
if axis.step.compile_time_value(env) != 1:
error(axis.step.pos, ERR_STEPS)
return self
if axis_no == 0:
self.mode = "fortran"
elif not axis.step.is_none and not first_or_last:
# step provided in some other dimension
error(axis.step.pos, ERR_STEPS)
return self
if not self.operand.is_name:
self.operand = self.operand.coerce_to_temp(env)
axes = [('direct', 'follow')] * len(axes)
if self.mode == "fortran":
axes[0] = ('direct', 'contig')
else:
axes[-1] = ('direct', 'contig')
self.coercion_type = PyrexTypes.MemoryViewSliceType(array_dtype, axes)
self.coercion_type.validate_memslice_dtype(self.pos)
self.type = self.get_cython_array_type(env)
MemoryView.use_cython_array_utility_code(env)
env.use_utility_code(MemoryView.typeinfo_to_format_code)
return self
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("temp allocated multiple times")
self.temp_code = code.funcstate.allocate_temp(self.type, True)
def infer_type(self, env):
return self.get_cython_array_type(env)
def get_cython_array_type(self, env):
cython_scope = env.global_scope().context.cython_scope
cython_scope.load_cythonscope()
return cython_scope.viewscope.lookup("array").type
def generate_result_code(self, code):
from . import Buffer
shapes = [self.shape_type.cast_code(shape.result())
for shape in self.shapes]
dtype = self.coercion_type.dtype
shapes_temp = code.funcstate.allocate_temp(py_object_type, True)
format_temp = code.funcstate.allocate_temp(py_object_type, True)
itemsize = "sizeof(%s)" % dtype.empty_declaration_code()
type_info = Buffer.get_type_information_cname(code, dtype)
if self.operand.type.is_ptr:
code.putln("if (!%s) {" % self.operand.result())
code.putln( 'PyErr_SetString(PyExc_ValueError,'
'"Cannot create cython.array from NULL pointer");')
code.putln(code.error_goto(self.operand.pos))
code.putln("}")
code.putln("%s = __pyx_format_from_typeinfo(&%s); %s" % (
format_temp,
type_info,
code.error_goto_if_null(format_temp, self.pos),
))
code.put_gotref(format_temp, py_object_type)
buildvalue_fmt = " __PYX_BUILD_PY_SSIZE_T " * len(shapes)
code.putln('%s = Py_BuildValue((char*) "(" %s ")", %s); %s' % (
shapes_temp,
buildvalue_fmt,
", ".join(shapes),
code.error_goto_if_null(shapes_temp, self.pos),
))
code.put_gotref(shapes_temp, py_object_type)
code.putln('%s = __pyx_array_new(%s, %s, PyBytes_AS_STRING(%s), (char *) "%s", (char *) %s); %s' % (
self.result(),
shapes_temp, itemsize, format_temp, self.mode, self.operand.result(),
code.error_goto_if_null(self.result(), self.pos),
))
self.generate_gotref(code)
def dispose(temp):
code.put_decref_clear(temp, py_object_type)
code.funcstate.release_temp(temp)
dispose(shapes_temp)
dispose(format_temp)
@classmethod
def from_carray(cls, src_node, env):
"""
Given a C array type, return a CythonArrayNode
"""
pos = src_node.pos
base_type = src_node.type
none_node = NoneNode(pos)
axes = []
while base_type.is_array:
axes.append(SliceNode(pos, start=none_node, stop=none_node,
step=none_node))
base_type = base_type.base_type
axes[-1].step = IntNode(pos, value="1", is_c_literal=True)
memslicenode = Nodes.MemoryViewSliceTypeNode(pos, axes=axes,
base_type_node=base_type)
result = CythonArrayNode(pos, base_type_node=memslicenode,
operand=src_node, array_dtype=base_type)
result = result.analyse_types(env)
return result
class SizeofNode(ExprNode):
# Abstract base class for sizeof(x) expression nodes.
type = PyrexTypes.c_size_t_type
def check_const(self):
return True
def generate_result_code(self, code):
pass
class SizeofTypeNode(SizeofNode):
# C sizeof function applied to a type
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
subexprs = []
arg_type = None
def analyse_types(self, env):
# we may have incorrectly interpreted a dotted name as a type rather than an attribute
# this could be better handled by more uniformly treating types as runtime-available objects
if 0 and self.base_type.module_path:
path = self.base_type.module_path
obj = env.lookup(path[0])
if obj.as_module is None:
operand = NameNode(pos=self.pos, name=path[0])
for attr in path[1:]:
operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr)
operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name)
node = SizeofVarNode(self.pos, operand=operand).analyse_types(env)
return node
if self.arg_type is None:
base_type = self.base_type.analyse(env)
_, arg_type = self.declarator.analyse(base_type, env)
self.arg_type = arg_type
self.check_type()
return self
def check_type(self):
arg_type = self.arg_type
if not arg_type:
return
if arg_type.is_pyobject and not arg_type.is_extension_type:
error(self.pos, "Cannot take sizeof Python object")
elif arg_type.is_void:
error(self.pos, "Cannot take sizeof void")
elif not arg_type.is_complete():
error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type)
def calculate_result_code(self):
if self.arg_type.is_extension_type or self.arg_type.is_cyp_class:
# the size of the pointer is boring
# we want the size of the actual struct
arg_code = self.arg_type.declaration_code("", deref=1)
else:
arg_code = self.arg_type.empty_declaration_code()
return "(sizeof(%s))" % arg_code
class SizeofVarNode(SizeofNode):
# C sizeof function applied to a variable
#
# operand ExprNode
subexprs = ['operand']
def analyse_types(self, env):
# We may actually be looking at a type rather than a variable...
# If we are, traditional analysis would fail...
operand_as_type = self.operand.analyse_as_type(env)
if operand_as_type:
self.arg_type = operand_as_type
if self.arg_type.is_fused:
self.arg_type = self.arg_type.specialize(env.fused_to_specific)
self.__class__ = SizeofTypeNode
self.check_type()
else:
self.operand = self.operand.analyse_types(env)
return self
def calculate_result_code(self):
return "(sizeof(%s))" % self.operand.result()
def generate_result_code(self, code):
pass
class ConsumeNode(ExprNode):
# Consume expression
#
# operand ExprNode
#
# nogil boolean used internally
# generate_runtime_check boolean used internally
# operand_is_named boolean used internally
subexprs = ['operand']
def infer_type(self, env):
operand_type = self.operand.infer_type(env)
if operand_type.is_cyp_class:
if operand_type.is_qualified_cyp_class:
operand_type = operand_type.qual_base_type
return PyrexTypes.cyp_class_qualified_type(operand_type, 'iso~')
else:
return operand_type
def analyse_types(self, env):
self.nogil = env.nogil
self.operand = self.operand.analyse_types(env)
operand_type = self.operand.type
if not operand_type.is_cyp_class:
error(self.pos, "Can only consume cypclass (not '%s')" % operand_type)
self.type = PyrexTypes.error_type
return self
if operand_type.is_qualified_cyp_class:
if operand_type.qualifier == 'iso->':
error(self.pos, "Cannot consume iso->")
self.type = PyrexTypes.error_type
return self
self.generate_runtime_check = operand_type.qualifier not in ('iso', 'iso~')
if operand_type.qualifier == 'iso~':
self.type = operand_type
else:
self.type = PyrexTypes.cyp_class_qualified_type(operand_type.qual_base_type, 'iso~')
else:
self.generate_runtime_check = True
self.type = PyrexTypes.cyp_class_qualified_type(operand_type, 'iso~')
self.operand_is_named = self.operand.is_name or self.operand.is_attribute
self.is_temp = self.operand_is_named or self.generate_runtime_check
if self.is_temp:
# We steal the reference of the operand.
self.use_managed_ref = False
return self
def may_be_none(self):
return self.operand.may_be_none()
def is_simple(self):
return self.operand.is_simple()
def make_owned_reference(self, code):
# We steal the reference of the consumed operand.
pass
def calculate_result_code(self):
return self.operand.result()
def generate_result_code(self, code):
if self.is_temp:
operand_result = self.operand.result()
result_code = self.result()
code.putln("%s = %s;" % (result_code, operand_result))
if self.generate_runtime_check:
code.putln("if (%s != NULL && !%s->CyObject_iso()) {" % (result_code, result_code))
if self.nogil:
code.putln("#ifdef WITH_THREAD")
code.putln("PyGILState_STATE _save = PyGILState_Ensure();")
code.putln("#endif")
code.putln("PyErr_SetString(PyExc_TypeError, \"'consume' operand is not isolated\");")
if self.nogil:
code.putln("#ifdef WITH_THREAD")
code.putln("PyGILState_Release(_save);")
code.putln("#endif")
code.putln(
code.error_goto(self.pos))
code.putln("}")
# We steal the reference of the operand.
code.putln("%s = NULL;" % operand_result)
if self.operand.is_temp:
# TODO: steal the reference of the operand instead
code.put_incref(self.result(), self.type)
class TypeidNode(ExprNode):
# C++ typeid operator applied to a type or variable
#
# operand ExprNode
# arg_type ExprNode
# is_variable boolean
type = PyrexTypes.error_type
subexprs = ['operand']
arg_type = None
is_variable = None
is_temp = 1
def get_type_info_type(self, env):
env_module = env
while not env_module.is_module_scope:
env_module = env_module.outer_scope
typeinfo_module = env_module.find_module('libcpp.typeinfo', self.pos)
typeinfo_entry = typeinfo_module.lookup('type_info')
return PyrexTypes.CFakeReferenceType(PyrexTypes.c_const_type(typeinfo_entry.type))
cpp_message = 'typeid operator'
def analyse_types(self, env):
self.cpp_check(env)
type_info = self.get_type_info_type(env)
if not type_info:
self.error("The 'libcpp.typeinfo' module must be cimported to use the typeid() operator")
return self
self.type = type_info
as_type = self.operand.analyse_as_specialized_type(env)
if as_type:
self.arg_type = as_type
self.is_type = True
else:
self.arg_type = self.operand.analyse_types(env)
self.is_type = False
if self.arg_type.type.is_pyobject:
self.error("Cannot use typeid on a Python object")
return self
elif self.arg_type.type.is_void:
self.error("Cannot use typeid on void")
return self
elif not self.arg_type.type.is_complete():
self.error("Cannot use typeid on incomplete type '%s'" % self.arg_type.type)
return self
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
return self
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = ""
def check_const(self):
return True
def calculate_result_code(self):
return self.temp_code
def generate_result_code(self, code):
if self.is_type:
arg_code = self.arg_type.empty_declaration_code()
else:
arg_code = self.arg_type.result()
translate_cpp_exception(code, self.pos,
"%s = typeid(%s);" % (self.temp_code, arg_code),
None, None, self.in_nogil_context)
class TypeofNode(ExprNode):
# Compile-time type of an expression, as a string.
#
# operand ExprNode
# literal StringNode # internal
literal = None
type = py_object_type
subexprs = ['literal'] # 'operand' will be ignored after type analysis!
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
value = StringEncoding.EncodedString(str(self.operand.type)) #self.operand.type.typeof_name())
literal = StringNode(self.pos, value=value)
literal = literal.analyse_types(env)
self.literal = literal.coerce_to_pyobject(env)
return self
def analyse_as_type(self, env):
self.operand = self.operand.analyse_types(env)
return self.operand.type
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
self.literal.generate_evaluation_code(code)
def calculate_result_code(self):
return self.literal.calculate_result_code()
#-------------------------------------------------------------------
#
# Binary operator nodes
#
#-------------------------------------------------------------------
try:
matmul_operator = operator.matmul
except AttributeError:
def matmul_operator(a, b):
try:
func = a.__matmul__
except AttributeError:
func = b.__rmatmul__
return func(a, b)
compile_time_binary_operators = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'is': operator.is_,
'is_not': operator.is_not,
'+': operator.add,
'&': operator.and_,
'/': operator.truediv,
'//': operator.floordiv,
'<<': operator.lshift,
'%': operator.mod,
'*': operator.mul,
'|': operator.or_,
'**': operator.pow,
'>>': operator.rshift,
'-': operator.sub,
'^': operator.xor,
'@': matmul_operator,
'in': lambda x, seq: x in seq,
'not_in': lambda x, seq: x not in seq,
}
def get_compile_time_binop(node):
func = compile_time_binary_operators.get(node.operator)
if not func:
error(node.pos,
"Binary '%s' not supported in compile-time expression"
% node.operator)
return func
class BinopNode(ExprNode):
# operator string
# operand1 ExprNode
# operand2 ExprNode
# op_func_type CFuncType or None
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when neither operand is a pyobject.
# - Check operand types and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand1', 'operand2']
inplace = False
op_func_type = None
def calculate_constant_result(self):
func = compile_time_binary_operators[self.operator]
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
func = get_compile_time_binop(self)
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
return func(operand1, operand2)
except Exception as e:
self.compile_time_value_error(e)
def infer_type(self, env):
return self.result_type(self.operand1.infer_type(env),
self.operand2.infer_type(env), env)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
self.analyse_operation(env)
return self
def analyse_operation(self, env):
if self.is_pythran_operation(env):
self.type = self.result_type(self.operand1.type,
self.operand2.type, env)
assert self.type.is_pythran_expr
self.is_temp = 1
elif self.is_cpp_py_operation():
self.analyse_cpp_py_operation(env)
elif self.is_py_operation():
self.analyse_py_operation(env)
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
def is_py_operation(self):
return self.is_py_operation_types(self.operand1.type, self.operand2.type)
def is_py_operation_types(self, type1, type2):
return type1.is_pyobject or type2.is_pyobject or type1.is_ctuple or type2.is_ctuple
def is_pythran_operation(self, env):
return self.is_pythran_operation_types(self.operand1.type, self.operand2.type, env)
def is_pythran_operation_types(self, type1, type2, env):
# Support only expr op supported_type, or supported_type op expr
return has_np_pythran(env) and \
(is_pythran_supported_operation_type(type1) and is_pythran_supported_operation_type(type2)) and \
(is_pythran_expr(type1) or is_pythran_expr(type2))
def is_cpp_py_operation(self):
type1 = self.operand1.type
type2 = self.operand2.type
return type1.is_cpp_class and type2.is_pyobject
def is_cpp_operation(self):
return (self.operand1.type.is_cpp_class
or self.operand2.type.is_cpp_class)
def analyse_cpp_py_operation(self, env):
operator = self.operator if not self.inplace else self.operator+"="
entry = None
try:
entry = env.lookup_operator(operator, [self.operand1, self.operand2], throw=True)
except (PyrexTypes.AmbiguousCallException, PyrexTypes.NoTypeMatchCallException):
error(self.pos, ("Ambiguous operation with PyObject operand\n"
"To select one of the alternatives, explicitly cast the PyObject operand\n"
"To let Python handle the operation instead, cast the other operand to 'object'"
"\n"))
self.type = PyrexTypes.error_type
return
except PyrexTypes.CallException:
pass
if entry:
self.analyse_cpp_operation(env)
else:
self.analyse_py_operation(env)
def analyse_py_operation(self, env):
self.coerce_operands_to_pyobjects(env)
self.type = self.result_type(self.operand1.type,
self.operand2.type, env)
assert self.type.is_pyobject
self.is_temp = 1
def analyse_cpp_operation(self, env):
operator = self.operator = self.operator if not self.inplace else self.operator+"="
entry = env.lookup_operator(operator, [self.operand1, self.operand2])
if not entry:
self.type_error()
return
func_type = entry.type
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check == '+':
# Used by NumBinopNodes to break up expressions involving multiple
# operators so that exceptions can be handled properly.
self.is_temp = 1
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
elif self.exception_check == "~":
self.is_temp = 1
if func_type.is_ptr:
func_type = func_type.base_type
self.op_func_type = func_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type.as_returned_type()
def result_type(self, type1, type2, env):
if self.is_pythran_operation_types(type1, type2, env):
return PythranExpr(pythran_binop_type(self.operator, type1, type2))
if self.is_py_operation_types(type1, type2):
if type2.is_string:
type2 = Builtin.bytes_type
elif type2.is_pyunicode_ptr:
type2 = Builtin.unicode_type
if type1.is_string:
type1 = Builtin.bytes_type
elif type1.is_pyunicode_ptr:
type1 = Builtin.unicode_type
if type1.is_builtin_type or type2.is_builtin_type:
if type1 is type2 and self.operator in '**%+|&^':
# FIXME: at least these operators should be safe - others?
return type1
result_type = self.infer_builtin_types_operation(type1, type2)
if result_type is not None:
return result_type
return py_object_type
elif type1.is_error or type2.is_error:
return PyrexTypes.error_type
else:
return self.compute_c_result_type(type1, type2)
def infer_builtin_types_operation(self, type1, type2):
return None
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def coerce_operands_to_pyobjects(self, env):
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def is_ephemeral(self):
return (super(BinopNode, self).is_ephemeral() or
self.operand1.is_ephemeral() or self.operand2.is_ephemeral())
def generate_result_code(self, code):
if self.type.is_pythran_expr:
code.putln("// Pythran binop")
code.putln("__Pyx_call_destructor(%s);" % self.result())
if self.operator == '**':
code.putln("new (&%s) decltype(%s){pythonic::numpy::functor::power{}(%s, %s)};" % (
self.result(),
self.result(),
self.operand1.pythran_result(),
self.operand2.pythran_result()))
else:
code.putln("new (&%s) decltype(%s){%s %s %s};" % (
self.result(),
self.result(),
self.operand1.pythran_result(),
self.operator,
self.operand2.pythran_result()))
elif self.operand1.type.is_pyobject:
function = self.py_operation_function(code)
if self.operator == '**':
extra_args = ", Py_None"
else:
extra_args = ""
code.putln(
"%s = %s(%s, %s%s); %s" % (
self.result(),
function,
self.operand1.py_result(),
self.operand2.py_result(),
extra_args,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
elif self.is_temp:
# C++ overloaded operators with exception values are currently all
# handled through temporaries.
is_cpp_operation = self.is_cpp_operation()
if is_cpp_operation and self.exception_check == '+':
translate_cpp_exception(code, self.pos,
"%s = %s;" % (self.result(), self.calculate_result_code()),
self.result() if self.type.is_pyobject else None,
self.exception_value, self.in_nogil_context)
elif is_cpp_operation and self.exception_check == "~":
checked_result_type = self.type
exc_check_code = checked_result_type.error_condition(self.result())
goto_error = code.error_goto_if(exc_check_code, self.pos)
code.putln("%s = %s; %s" % (self.result(), self.calculate_result_code(), goto_error))
else:
code.putln("%s = %s;" % (self.result(), self.calculate_result_code()))
def type_error(self):
if not (self.operand1.type.is_error
or self.operand2.type.is_error):
error(self.pos, "Invalid operand types for '%s' (%s; %s)" %
(self.operator, self.operand1.type,
self.operand2.type))
self.type = PyrexTypes.error_type
class CBinopNode(BinopNode):
def analyse_types(self, env):
node = BinopNode.analyse_types(self, env)
if node.is_py_operation():
node.type = PyrexTypes.error_type
return node
def py_operation_function(self, code):
return ""
def calculate_result_code(self):
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
def compute_c_result_type(self, type1, type2):
cpp_type = None
if type1.is_cpp_class or type1.is_ptr:
cpp_type = type1.find_cpp_operation_type(self.operator, type2)
if cpp_type is None and (type2.is_cpp_class or type2.is_ptr):
cpp_type = type2.find_cpp_operation_type(self.operator, type1)
# FIXME: do we need to handle other cases here?
return cpp_type
def c_binop_constructor(operator):
def make_binop_node(pos, **operands):
return CBinopNode(pos, operator=operator, **operands)
return make_binop_node
class NumBinopNode(BinopNode):
# Binary operation taking numeric arguments.
infix = True
overflow_check = False
overflow_bit_node = None
def analyse_c_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.type = self.compute_c_result_type(type1, type2)
if not self.type:
self.type_error()
return
if self.type.is_complex:
self.infix = False
if (self.type.is_int
and env.directives['overflowcheck']
and self.operator in self.overflow_op_names):
if (self.operator in ('+', '*')
and self.operand1.has_constant_result()
and not self.operand2.has_constant_result()):
self.operand1, self.operand2 = self.operand2, self.operand1
self.overflow_check = True
self.overflow_fold = env.directives['overflowcheck.fold']
self.func = self.type.overflow_check_binop(
self.overflow_op_names[self.operator],
env,
const_rhs = self.operand2.has_constant_result())
self.is_temp = True
if not self.infix or (type1.is_numeric and type2.is_numeric):
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
def compute_c_result_type(self, type1, type2):
if self.c_types_okay(type1, type2):
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
if widest_type is PyrexTypes.c_bint_type:
if self.operator not in '|^&':
# False + False == 0 # not False!
widest_type = PyrexTypes.c_int_type
else:
widest_type = PyrexTypes.widest_numeric_type(
widest_type, PyrexTypes.c_int_type)
return widest_type
else:
cpp_type = None
operator = self.operator if not self.inplace else self.operator+"="
if type1.is_cpp_class or type1.is_ptr:
cpp_type = type1.find_cpp_operation_type(operator, type2)
if cpp_type is None and (type2.is_cpp_class or type2.is_ptr):
cpp_type = type2.find_cpp_operation_type(operator, type1)
# FIXME: do we need to handle other cases here?
if self.inplace and type1.is_subclass(cpp_type):
cpp_type = type1
return cpp_type
def may_be_none(self):
if self.type and self.type.is_builtin_type:
# if we know the result type, we know the operation, so it can't be None
return False
type1 = self.operand1.type
type2 = self.operand2.type
if type1 and type1.is_builtin_type and type2 and type2.is_builtin_type:
# XXX: I can't think of any case where a binary operation
# on builtin types evaluates to None - add a special case
# here if there is one.
return False
return super(NumBinopNode, self).may_be_none()
def get_constant_c_result_code(self):
value1 = self.operand1.get_constant_c_result_code()
value2 = self.operand2.get_constant_c_result_code()
if value1 and value2:
return "(%s %s %s)" % (value1, self.operator, value2)
else:
return None
def c_types_okay(self, type1, type2):
#print "NumBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_numeric or type1.is_enum) \
and (type2.is_numeric or type2.is_enum)
def generate_evaluation_code(self, code):
if self.overflow_check:
self.overflow_bit_node = self
self.overflow_bit = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = 0;" % self.overflow_bit)
super(NumBinopNode, self).generate_evaluation_code(code)
if self.overflow_check:
code.putln("if (unlikely(%s)) {" % self.overflow_bit)
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large");')
code.putln(code.error_goto(self.pos))
code.putln("}")
code.funcstate.release_temp(self.overflow_bit)
def calculate_result_code(self):
if self.overflow_bit_node is not None:
return "%s(%s, %s, &%s)" % (
self.func,
self.operand1.result(),
self.operand2.result(),
self.overflow_bit_node.overflow_bit)
elif self.type.is_cpp_class or self.infix:
if is_pythran_expr(self.type):
result1, result2 = self.operand1.pythran_result(), self.operand2.pythran_result()
else:
result1, result2 = self.operand1.result(), self.operand2.result()
if self.operand1.type.is_cyp_class:
result1 = "(*%s)" % result1
return "(%s %s %s)" % (result1, self.operator, result2)
else:
func = self.type.binary_op(self.operator)
if func is None:
error(self.pos, "binary operator %s not supported for %s" % (self.operator, self.type))
return "%s(%s, %s)" % (
func,
self.operand1.result(),
self.operand2.result())
def is_py_operation_types(self, type1, type2):
return (type1.is_unicode_char or
type2.is_unicode_char or
BinopNode.is_py_operation_types(self, type1, type2))
def py_operation_function(self, code):
function_name = self.py_functions[self.operator]
if self.inplace:
function_name = function_name.replace('PyNumber_', 'PyNumber_InPlace')
return function_name
py_functions = {
"|": "PyNumber_Or",
"^": "PyNumber_Xor",
"&": "PyNumber_And",
"<<": "PyNumber_Lshift",
">>": "PyNumber_Rshift",
"+": "PyNumber_Add",
"-": "PyNumber_Subtract",
"*": "PyNumber_Multiply",
"@": "__Pyx_PyNumber_MatrixMultiply",
"/": "__Pyx_PyNumber_Divide",
"//": "PyNumber_FloorDivide",
"%": "PyNumber_Remainder",
"**": "PyNumber_Power",
}
overflow_op_names = {
"+": "add",
"-": "sub",
"*": "mul",
"<<": "lshift",
}
class IntBinopNode(NumBinopNode):
# Binary operation taking integer arguments.
def c_types_okay(self, type1, type2):
#print "IntBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_int or type1.is_enum) \
and (type2.is_int or type2.is_enum)
class AddNode(NumBinopNode):
# '+' operator.
def is_py_operation_types(self, type1, type2):
if type1.is_string and type2.is_string or type1.is_pyunicode_ptr and type2.is_pyunicode_ptr:
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# b'abc' + 'abc' raises an exception in Py3,
# so we can safely infer the Py2 type for bytes here
string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2 in string_types:
return string_types[max(string_types.index(type1),
string_types.index(type2))]
return None
def compute_c_result_type(self, type1, type2):
#print "AddNode.compute_c_result_type:", type1, self.operator, type2 ###
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type2.is_ptr or type2.is_array) and (type1.is_int or type1.is_enum):
return type2
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
def py_operation_function(self, code):
type1, type2 = self.operand1.type, self.operand2.type
func = None
if type1 is unicode_type or type2 is unicode_type:
if type1 in (unicode_type, str_type) and type2 in (unicode_type, str_type):
is_unicode_concat = True
elif isinstance(self.operand1, FormattedValueNode) or isinstance(self.operand2, FormattedValueNode):
# Assume that even if we don't know the second type, it's going to be a string.
is_unicode_concat = True
else:
# Operation depends on the second type.
is_unicode_concat = False
if is_unicode_concat:
if self.inplace or self.operand1.is_temp:
code.globalstate.use_utility_code(
UtilityCode.load_cached("UnicodeConcatInPlace", "ObjectHandling.c"))
func = '__Pyx_PyUnicode_Concat'
elif type1 is str_type and type2 is str_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("StrConcatInPlace", "ObjectHandling.c"))
func = '__Pyx_PyStr_Concat'
if func:
# any necessary utility code will be got by "NumberAdd" in generate_evaluation_code
if self.inplace or self.operand1.is_temp:
func += 'InPlace' # upper case to indicate unintuitive macro
if self.operand1.may_be_none() or self.operand2.may_be_none():
func += 'Safe'
return func
return super(AddNode, self).py_operation_function(code)
class SubNode(NumBinopNode):
# '-' operator.
def compute_c_result_type(self, type1, type2):
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type1.is_ptr or type1.is_array) and (type2.is_ptr or type2.is_array):
return PyrexTypes.c_ptrdiff_t_type
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
class MulNode(NumBinopNode):
# '*' operator.
def is_py_operation_types(self, type1, type2):
if ((type1.is_string and type2.is_int) or
(type2.is_string and type1.is_int)):
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# let's assume that whatever builtin type you multiply a string with
# will either return a string of the same type or fail with an exception
string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2.is_builtin_type:
return type1
if type2 in string_types and type1.is_builtin_type:
return type2
# multiplication of containers/numbers with an integer value
# always (?) returns the same type
if type1.is_int:
return type2
if type2.is_int:
return type1
return None
class MatMultNode(NumBinopNode):
# '@' operator.
def is_py_operation_types(self, type1, type2):
return True
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("MatrixMultiply", "ObjectHandling.c"))
super(MatMultNode, self).generate_evaluation_code(code)
class DivNode(NumBinopNode):
# '/' or '//' operator.
cdivision = None
truedivision = None # == "unknown" if operator == '/'
ctruedivision = False
cdivision_warnings = False
zerodivision_check = None
def find_compile_time_binary_operator(self, op1, op2):
func = compile_time_binary_operators[self.operator]
if self.operator == '/' and self.truedivision is None:
# => true div for floats, floor div for integers
if isinstance(op1, _py_int_types) and isinstance(op2, _py_int_types):
func = compile_time_binary_operators['//']
return func
def calculate_constant_result(self):
op1 = self.operand1.constant_result
op2 = self.operand2.constant_result
func = self.find_compile_time_binary_operator(op1, op2)
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
func = self.find_compile_time_binary_operator(
operand1, operand2)
return func(operand1, operand2)
except Exception as e:
self.compile_time_value_error(e)
def _check_truedivision(self, env):
if self.cdivision or env.directives['cdivision']:
self.ctruedivision = False
else:
self.ctruedivision = self.truedivision
def infer_type(self, env):
self._check_truedivision(env)
return self.result_type(
self.operand1.infer_type(env),
self.operand2.infer_type(env), env)
def analyse_operation(self, env):
self._check_truedivision(env)
NumBinopNode.analyse_operation(self, env)
if self.is_cpp_operation():
self.cdivision = True
if not self.type.is_pyobject:
self.zerodivision_check = (
self.cdivision is None and not env.directives['cdivision']
and (not self.operand2.has_constant_result() or
self.operand2.constant_result == 0))
if self.zerodivision_check or env.directives['cdivision_warnings']:
# Need to check ahead of time to warn or raise zero division error
self.operand1 = self.operand1.coerce_to_simple(env)
self.operand2 = self.operand2.coerce_to_simple(env)
def compute_c_result_type(self, type1, type2):
if self.operator == '/' and self.ctruedivision and not type1.is_cpp_class and not type2.is_cpp_class:
if not type1.is_float and not type2.is_float:
widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type)
widest_type = PyrexTypes.widest_numeric_type(type2, widest_type)
return widest_type
return NumBinopNode.compute_c_result_type(self, type1, type2)
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float division"
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.type.is_complex:
if self.cdivision is None:
self.cdivision = (
code.globalstate.directives['cdivision']
or self.type.is_float
or ((self.type.is_numeric or self.type.is_enum) and not self.type.signed)
)
if not self.cdivision:
code.globalstate.use_utility_code(
UtilityCode.load_cached("DivInt", "CMath.c").specialize(self.type))
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def generate_div_warning_code(self, code):
in_nogil = self.in_nogil_context
if not self.type.is_pyobject:
if self.zerodivision_check:
if not self.infix:
zero_test = "%s(%s)" % (self.type.unary_op('zero'), self.operand2.result())
else:
zero_test = "%s == 0" % self.operand2.result()
code.putln("if (unlikely(%s)) {" % zero_test)
if in_nogil:
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message())
if in_nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if self.type.is_int and self.type.signed and self.operator != '%':
code.globalstate.use_utility_code(UtilityCode.load_cached("UnaryNegOverflows", "Overflow.c"))
if self.operand2.type.signed == 2:
# explicitly signed, no runtime check needed
minus1_check = 'unlikely(%s == -1)' % self.operand2.result()
else:
type_of_op2 = self.operand2.type.empty_declaration_code()
minus1_check = '(!(((%s)-1) > 0)) && unlikely(%s == (%s)-1)' % (
type_of_op2, self.operand2.result(), type_of_op2)
code.putln("else if (sizeof(%s) == sizeof(long) && %s "
" && unlikely(__Pyx_UNARY_NEG_WOULD_OVERFLOW(%s))) {" % (
self.type.empty_declaration_code(),
minus1_check,
self.operand1.result()))
if in_nogil:
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large to perform division");')
if in_nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if code.globalstate.directives['cdivision_warnings'] and self.operator != '/':
code.globalstate.use_utility_code(
UtilityCode.load_cached("CDivisionWarning", "CMath.c"))
code.putln("if (unlikely((%s < 0) ^ (%s < 0))) {" % (
self.operand1.result(),
self.operand2.result()))
warning_code = "__Pyx_cdivision_warning(%(FILENAME)s, %(LINENO)s)" % {
'FILENAME': Naming.filename_cname,
'LINENO': Naming.lineno_cname,
}
if in_nogil:
result_code = 'result'
code.putln("int %s;" % result_code)
code.put_ensure_gil()
code.putln(code.set_error_info(self.pos, used=True))
code.putln("%s = %s;" % (result_code, warning_code))
code.put_release_ensured_gil()
else:
result_code = warning_code
code.putln(code.set_error_info(self.pos, used=True))
code.put("if (unlikely(%s)) " % result_code)
code.put_goto(code.error_label)
code.putln("}")
def calculate_result_code(self):
if self.type.is_complex or self.is_cpp_operation():
return NumBinopNode.calculate_result_code(self)
elif self.type.is_float and self.operator == '//':
return "floor(%s / %s)" % (
self.operand1.result(),
self.operand2.result())
elif self.truedivision or self.cdivision:
op1 = self.operand1.result()
op2 = self.operand2.result()
if self.truedivision:
if self.type != self.operand1.type:
op1 = self.type.cast_code(op1)
if self.type != self.operand2.type:
op2 = self.type.cast_code(op2)
if self.operand1.type.is_cyp_class:
op1 = "*(%s)" % op1
return "(%s / %s)" % (op1, op2)
else:
return "__Pyx_div_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
_find_formatting_types = re.compile(
br"%"
br"(?:%|" # %%
br"(?:\([^)]+\))?" # %(name)
br"[-+#,0-9 ]*([a-z])" # %.2f etc.
br")").findall
# These format conversion types can never trigger a Unicode string conversion in Py2.
_safe_bytes_formats = set([
# Excludes 's' and 'r', which can generate non-bytes strings.
b'd', b'i', b'o', b'u', b'x', b'X', b'e', b'E', b'f', b'F', b'g', b'G', b'c', b'b', b'a',
])
class ModNode(DivNode):
# '%' operator.
def is_py_operation_types(self, type1, type2):
return (type1.is_string
or type2.is_string
or NumBinopNode.is_py_operation_types(self, type1, type2))
def infer_builtin_types_operation(self, type1, type2):
# b'%s' % xyz raises an exception in Py3<3.5, so it's safe to infer the type for Py2 and later Py3's.
if type1 is unicode_type:
# None + xyz may be implemented by RHS
if type2.is_builtin_type or not self.operand1.may_be_none():
return type1
elif type1 in (bytes_type, str_type, basestring_type):
if type2 is unicode_type:
return type2
elif type2.is_numeric:
return type1
elif self.operand1.is_string_literal:
if type1 is str_type or type1 is bytes_type:
if set(_find_formatting_types(self.operand1.value)) <= _safe_bytes_formats:
return type1
return basestring_type
elif type1 is bytes_type and not type2.is_builtin_type:
return None # RHS might implement '% operator differently in Py3
else:
return basestring_type # either str or unicode, can't tell
return None
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float divmod()"
def analyse_operation(self, env):
DivNode.analyse_operation(self, env)
if not self.type.is_pyobject:
if self.cdivision is None:
self.cdivision = env.directives['cdivision'] or not self.type.signed
if not self.cdivision and not self.type.is_int and not self.type.is_float:
error(self.pos, "mod operator not supported for type '%s'" % self.type)
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.cdivision:
if self.type.is_int:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ModInt", "CMath.c").specialize(self.type))
else: # float
code.globalstate.use_utility_code(
UtilityCode.load_cached("ModFloat", "CMath.c").specialize(
self.type, math_h_modifier=self.type.math_h_modifier))
# NOTE: skipping over DivNode here
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def calculate_result_code(self):
if self.cdivision:
if self.type.is_float:
return "fmod%s(%s, %s)" % (
self.type.math_h_modifier,
self.operand1.result(),
self.operand2.result())
else:
op1 = self.operand1.result()
if self.operand1.type.is_cyp_class:
op1 = "(*%s)" % op1
return "(%s %s %s)" % (
op1,
self.operator,
self.operand2.result())
else:
return "__Pyx_mod_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
def py_operation_function(self, code):
type1, type2 = self.operand1.type, self.operand2.type
# ("..." % x) must call "x.__rmod__()" for string subtypes.
if type1 is unicode_type:
if self.operand1.may_be_none() or (
type2.is_extension_type and type2.subtype_of(type1) or
type2 is py_object_type and not isinstance(self.operand2, CoerceToPyTypeNode)):
return '__Pyx_PyUnicode_FormatSafe'
else:
return 'PyUnicode_Format'
elif type1 is str_type:
if self.operand1.may_be_none() or (
type2.is_extension_type and type2.subtype_of(type1) or
type2 is py_object_type and not isinstance(self.operand2, CoerceToPyTypeNode)):
return '__Pyx_PyString_FormatSafe'
else:
return '__Pyx_PyString_Format'
return super(ModNode, self).py_operation_function(code)
class PowNode(NumBinopNode):
# '**' operator.
def analyse_c_operation(self, env):
NumBinopNode.analyse_c_operation(self, env)
if self.type.is_complex:
if self.type.real_type.is_float:
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
self.pow_func = self.type.binary_op('**')
else:
error(self.pos, "complex int powers not supported")
self.pow_func = ""
elif self.type.is_float:
self.pow_func = "pow" + self.type.math_h_modifier
elif self.type.is_int:
self.pow_func = "__Pyx_pow_%s" % self.type.empty_declaration_code().replace(' ', '_')
env.use_utility_code(
UtilityCode.load_cached("IntPow", "CMath.c").specialize(
func_name=self.pow_func,
type=self.type.empty_declaration_code(),
signed=self.type.signed and 1 or 0))
elif not self.type.is_error:
error(self.pos, "got unexpected types for C power operator: %s, %s" %
(self.operand1.type, self.operand2.type))
def compute_c_result_type(self, type1, type2):
c_result_type = super(PowNode, self).compute_c_result_type(type1, type2)
if isinstance(self.operand2.constant_result, _py_int_types) and self.operand2.constant_result < 0:
c_result_type = PyrexTypes.widest_numeric_type(c_result_type, PyrexTypes.c_double_type)
return c_result_type
def calculate_result_code(self):
# Work around MSVC overloading ambiguity.
def typecast(operand):
if self.type == operand.type:
return operand.result()
else:
return self.type.cast_code(operand.result())
op1 = typecast(self.operand1)
if self.operand1.type.is_cyp_class:
op1 = "(*%s)" % op1
return "%s(%s, %s)" % (
self.pow_func,
op1,
typecast(self.operand2))
def py_operation_function(self, code):
if (self.type.is_pyobject and
self.operand1.constant_result == 2 and
isinstance(self.operand1.constant_result, _py_int_types) and
self.operand2.type is py_object_type):
code.globalstate.use_utility_code(UtilityCode.load_cached('PyNumberPow2', 'Optimize.c'))
if self.inplace:
return '__Pyx_PyNumber_InPlacePowerOf2'
else:
return '__Pyx_PyNumber_PowerOf2'
return super(PowNode, self).py_operation_function(code)
class BoolBinopNode(ExprNode):
"""
Short-circuiting boolean operation.
Note that this node provides the same code generation method as
BoolBinopResultNode to simplify expression nesting.
operator string "and"/"or"
operand1 BoolBinopNode/BoolBinopResultNode left operand
operand2 BoolBinopNode/BoolBinopResultNode right operand
"""
subexprs = ['operand1', 'operand2']
is_temp = True
operator = None
operand1 = None
operand2 = None
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
return PyrexTypes.independent_spanning_type(type1, type2)
def may_be_none(self):
if self.operator == 'or':
return self.operand2.may_be_none()
else:
return self.operand1.may_be_none() or self.operand2.may_be_none()
def calculate_constant_result(self):
operand1 = self.operand1.constant_result
operand2 = self.operand2.constant_result
if self.operator == 'and':
self.constant_result = operand1 and operand2
else:
self.constant_result = operand1 or operand2
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
if self.operator == 'and':
return operand1 and operand2
else:
return operand1 or operand2
def is_ephemeral(self):
return self.operand1.is_ephemeral() or self.operand2.is_ephemeral()
def analyse_types(self, env):
# Note: we do not do any coercion here as we most likely do not know the final type anyway.
# We even accept to set self.type to ErrorType if both operands do not have a spanning type.
# The coercion to the final type and to a "simple" value is left to coerce_to().
operand1 = self.operand1.analyse_types(env)
operand2 = self.operand2.analyse_types(env)
self.type = PyrexTypes.independent_spanning_type(
operand1.type, operand2.type)
self.operand1 = self._wrap_operand(operand1, env)
self.operand2 = self._wrap_operand(operand2, env)
return self
def _wrap_operand(self, operand, env):
if not isinstance(operand, (BoolBinopNode, BoolBinopResultNode)):
operand = BoolBinopResultNode(operand, self.type, env)
return operand
def wrap_operands(self, env):
"""
Must get called by transforms that want to create a correct BoolBinopNode
after the type analysis phase.
"""
self.operand1 = self._wrap_operand(self.operand1, env)
self.operand2 = self._wrap_operand(self.operand2, env)
def coerce_to_boolean(self, env):
return self.coerce_to(PyrexTypes.c_bint_type, env)
def coerce_to(self, dst_type, env):
operand1 = self.operand1.coerce_to(dst_type, env)
operand2 = self.operand2.coerce_to(dst_type, env)
return BoolBinopNode.from_node(
self, type=dst_type,
operator=self.operator,
operand1=operand1, operand2=operand2)
def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
outer_labels = (and_label, or_label)
if self.operator == 'and':
my_label = and_label = code.new_label('next_and')
else:
my_label = or_label = code.new_label('next_or')
self.operand1.generate_bool_evaluation_code(
code, final_result_temp, final_result_type, and_label, or_label, end_label, my_label)
and_label, or_label = outer_labels
code.put_label(my_label)
self.operand2.generate_bool_evaluation_code(
code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through)
def generate_evaluation_code(self, code):
self.allocate_temp_result(code)
result_type = PyrexTypes.py_object_type if self.type.is_pyobject else self.type
or_label = and_label = None
end_label = code.new_label('bool_binop_done')
self.generate_bool_evaluation_code(code, self.result(), result_type, and_label, or_label, end_label, end_label)
code.put_label(end_label)
gil_message = "Truth-testing Python object"
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def generate_subexpr_disposal_code(self, code):
pass # nothing to do here, all done in generate_evaluation_code()
def free_subexpr_temps(self, code):
pass # nothing to do here, all done in generate_evaluation_code()
def generate_operand1_test(self, code):
# Generate code to test the truth of the first operand.
if self.type.is_pyobject:
test_result = code.funcstate.allocate_temp(
PyrexTypes.c_bint_type, manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.operand1.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.operand1.result()
return (test_result, self.type.is_pyobject)
class BoolBinopResultNode(ExprNode):
"""
Intermediate result of a short-circuiting and/or expression.
Tests the result for 'truthiness' and takes care of coercing the final result
of the overall expression to the target type.
Note that this node provides the same code generation method as
BoolBinopNode to simplify expression nesting.
arg ExprNode the argument to test
value ExprNode the coerced result value node
"""
subexprs = ['arg', 'value']
is_temp = True
arg = None
value = None
def __init__(self, arg, result_type, env):
# using 'arg' multiple times, so it must be a simple/temp value
arg = arg.coerce_to_simple(env)
# wrap in ProxyNode, in case a transform wants to replace self.arg later
arg = ProxyNode(arg)
super(BoolBinopResultNode, self).__init__(
arg.pos, arg=arg, type=result_type,
value=CloneNode(arg).coerce_to(result_type, env))
def coerce_to_boolean(self, env):
return self.coerce_to(PyrexTypes.c_bint_type, env)
def coerce_to(self, dst_type, env):
# unwrap, coerce, rewrap
arg = self.arg.arg
if dst_type is PyrexTypes.c_bint_type:
arg = arg.coerce_to_boolean(env)
# TODO: unwrap more coercion nodes?
return BoolBinopResultNode(arg, dst_type, env)
def nogil_check(self, env):
# let's leave all errors to BoolBinopNode
pass
def generate_operand_test(self, code):
# Generate code to test the truth of the first operand.
if self.arg.type.is_pyobject:
test_result = code.funcstate.allocate_temp(
PyrexTypes.c_bint_type, manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.arg.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.arg.result()
return (test_result, self.arg.type.is_pyobject)
def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
# x => x
# x and ... or ... => next 'and' / 'or'
# False ... or x => next 'or'
# True and x => next 'and'
# True or x => True (operand)
self.arg.generate_evaluation_code(code)
if and_label or or_label:
test_result, uses_temp = self.generate_operand_test(code)
if uses_temp and (and_label and or_label):
# cannot become final result => free early
# disposal: uses_temp and (and_label and or_label)
self.arg.generate_disposal_code(code)
sense = '!' if or_label else ''
code.putln("if (%s%s) {" % (sense, test_result))
if uses_temp:
code.funcstate.release_temp(test_result)
if not uses_temp or not (and_label and or_label):
# disposal: (not uses_temp) or {not (and_label and or_label) [if]}
self.arg.generate_disposal_code(code)
if or_label and or_label != fall_through:
# value is false => short-circuit to next 'or'
code.put_goto(or_label)
if and_label:
# value is true => go to next 'and'
if or_label:
code.putln("} else {")
if not uses_temp:
# disposal: (not uses_temp) and {(and_label and or_label) [else]}
self.arg.generate_disposal_code(code)
if and_label != fall_through:
code.put_goto(and_label)
if not and_label or not or_label:
# if no next 'and' or 'or', we provide the result
if and_label or or_label:
code.putln("} else {")
self.value.generate_evaluation_code(code)
self.value.make_owned_reference(code)
code.putln("%s = %s;" % (final_result_temp, self.value.result_as(final_result_type)))
self.value.generate_post_assignment_code(code)
# disposal: {not (and_label and or_label) [else]}
self.arg.generate_disposal_code(code)
self.value.free_temps(code)
if end_label != fall_through:
code.put_goto(end_label)
if and_label or or_label:
code.putln("}")
self.arg.free_temps(code)
def analyse_types(self, env):
return self
class CondExprNode(ExprNode):
# Short-circuiting conditional expression.
#
# test ExprNode
# true_val ExprNode
# false_val ExprNode
true_val = None
false_val = None
is_temp = True
subexprs = ['test', 'true_val', 'false_val']
def type_dependencies(self, env):
return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
def infer_type(self, env):
return PyrexTypes.independent_spanning_type(
self.true_val.infer_type(env),
self.false_val.infer_type(env))
def calculate_constant_result(self):
if self.test.constant_result:
self.constant_result = self.true_val.constant_result
else:
self.constant_result = self.false_val.constant_result
def is_ephemeral(self):
return self.true_val.is_ephemeral() or self.false_val.is_ephemeral()
def analyse_types(self, env):
self.test = self.test.analyse_types(env).coerce_to_boolean(env)
self.true_val = self.true_val.analyse_types(env)
self.false_val = self.false_val.analyse_types(env)
return self.analyse_result_type(env)
def analyse_result_type(self, env):
self.type = PyrexTypes.independent_spanning_type(
self.true_val.type, self.false_val.type)
if self.type.is_reference:
self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type)
if self.type.is_pyobject:
self.result_ctype = py_object_type
elif self.true_val.is_ephemeral() or self.false_val.is_ephemeral():
error(self.pos, "Unsafe C derivative of temporary Python reference used in conditional expression")
if self.true_val.type.is_pyobject or self.false_val.type.is_pyobject:
self.true_val = self.true_val.coerce_to(self.type, env)
self.false_val = self.false_val.coerce_to(self.type, env)
if self.type.is_error:
self.type_error()
return self
def coerce_to_integer(self, env):
self.true_val = self.true_val.coerce_to_integer(env)
self.false_val = self.false_val.coerce_to_integer(env)
self.result_ctype = None
return self.analyse_result_type(env)
def coerce_to(self, dst_type, env):
self.true_val = self.true_val.coerce_to(dst_type, env)
self.false_val = self.false_val.coerce_to(dst_type, env)
self.result_ctype = None
return self.analyse_result_type(env)
def type_error(self):
if not (self.true_val.type.is_error or self.false_val.type.is_error):
error(self.pos, "Incompatible types in conditional expression (%s; %s)" %
(self.true_val.type, self.false_val.type))
self.type = PyrexTypes.error_type
def check_const(self):
return (self.test.check_const()
and self.true_val.check_const()
and self.false_val.check_const())
def generate_evaluation_code(self, code):
# Because subexprs may not be evaluated we can use a more optimal
# subexpr allocation strategy than the default, so override evaluation_code.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.test.generate_evaluation_code(code)
code.putln("if (%s) {" % self.test.result())
self.eval_and_get(code, self.true_val)
code.putln("} else {")
self.eval_and_get(code, self.false_val)
code.putln("}")
self.test.generate_disposal_code(code)
self.test.free_temps(code)
def eval_and_get(self, code, expr):
expr.generate_evaluation_code(code)
if self.type.is_memoryviewslice:
expr.make_owned_memoryviewslice(code)
else:
expr.make_owned_reference(code)
code.putln('%s = %s;' % (self.result(), expr.result_as(self.ctype())))
expr.generate_post_assignment_code(code)
expr.free_temps(code)
def generate_subexpr_disposal_code(self, code):
pass # done explicitly above (cleanup must separately happen within the if/else blocks)
def free_subexpr_temps(self, code):
pass # done explicitly above (cleanup must separately happen within the if/else blocks)
richcmp_constants = {
"<" : "Py_LT",
"<=": "Py_LE",
"==": "Py_EQ",
"!=": "Py_NE",
"<>": "Py_NE",
">" : "Py_GT",
">=": "Py_GE",
# the following are faked by special compare functions
"in" : "Py_EQ",
"not_in": "Py_NE",
}
class CmpNode(object):
# Mixin class containing code common to PrimaryCmpNodes
# and CascadedCmpNodes.
special_bool_cmp_function = None
special_bool_cmp_utility_code = None
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def calculate_cascaded_constant_result(self, operand1_result):
func = compile_time_binary_operators[self.operator]
operand2_result = self.operand2.constant_result
if (isinstance(operand1_result, any_string_type) and
isinstance(operand2_result, any_string_type) and
type(operand1_result) != type(operand2_result)):
# string comparison of different types isn't portable
return
if self.operator in ('in', 'not_in'):
if isinstance(self.operand2, (ListNode, TupleNode, SetNode)):
if not self.operand2.args:
self.constant_result = self.operator == 'not_in'
return
elif isinstance(self.operand2, ListNode) and not self.cascade:
# tuples are more efficient to store than lists
self.operand2 = self.operand2.as_tuple()
elif isinstance(self.operand2, DictNode):
if not self.operand2.key_value_pairs:
self.constant_result = self.operator == 'not_in'
return
self.constant_result = func(operand1_result, operand2_result)
def cascaded_compile_time_value(self, operand1, denv):
func = get_compile_time_binop(self)
operand2 = self.operand2.compile_time_value(denv)
try:
result = func(operand1, operand2)
except Exception as e:
self.compile_time_value_error(e)
result = None
if result:
cascade = self.cascade
if cascade:
result = result and cascade.cascaded_compile_time_value(operand2, denv)
return result
def is_cpp_comparison(self):
return self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class
def is_cpp_py_comparison(self):
type1 = self.operand1.type
type2 = self.operand2.type
return type1.is_pyobject and type2.is_cpp_class or type1.is_cpp_class and type2.is_pyobject
def find_common_int_type(self, env, op, operand1, operand2):
# type1 != type2 and at least one of the types is not a C int
type1 = operand1.type
type2 = operand2.type
type1_can_be_int = False
type2_can_be_int = False
if operand1.is_string_literal and operand1.can_coerce_to_char_literal():
type1_can_be_int = True
if operand2.is_string_literal and operand2.can_coerce_to_char_literal():
type2_can_be_int = True
if type1.is_int:
if type2_can_be_int:
return type1
elif type2.is_int:
if type1_can_be_int:
return type2
elif type1_can_be_int:
if type2_can_be_int:
if Builtin.unicode_type in (type1, type2):
return PyrexTypes.c_py_ucs4_type
else:
return PyrexTypes.c_uchar_type
return None
def find_common_type(self, env, op, operand1, common_type=None):
operand2 = self.operand2
type1 = operand1.type
type2 = operand2.type
new_common_type = None
# catch general errors
if (type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or
type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type))):
error(self.pos, "Comparisons between bytes/unicode and str are not portable to Python 3")
new_common_type = error_type
# try to use numeric comparisons where possible
elif type1.is_complex or type2.is_complex:
if (op not in ('==', '!=')
and (type1.is_complex or type1.is_numeric)
and (type2.is_complex or type2.is_numeric)):
error(self.pos, "complex types are unordered")
new_common_type = error_type
elif type1.is_pyobject:
new_common_type = Builtin.complex_type if type1.subtype_of(Builtin.complex_type) else py_object_type
elif type2.is_pyobject:
new_common_type = Builtin.complex_type if type2.subtype_of(Builtin.complex_type) else py_object_type
else:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif type1.is_numeric and type2.is_numeric:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif common_type is None or not common_type.is_pyobject:
new_common_type = self.find_common_int_type(env, op, operand1, operand2)
if new_common_type is None:
# fall back to generic type compatibility tests
if type1.is_ctuple or type2.is_ctuple:
new_common_type = py_object_type
elif type1 == type2:
new_common_type = type1
elif type1.is_pyobject or type2.is_pyobject:
if type2.is_numeric or type2.is_string:
if operand2.check_for_coercion_error(type1, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif type1.is_numeric or type1.is_string:
if operand1.check_for_coercion_error(type2, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif py_object_type.assignable_from(type1) and py_object_type.assignable_from(type2):
new_common_type = py_object_type
else:
# one Python type and one non-Python type, not assignable
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
elif type1.assignable_from(type2):
new_common_type = type1
elif type2.assignable_from(type1):
new_common_type = type2
else:
# C types that we couldn't handle up to here are an error
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
if new_common_type.is_string and (isinstance(operand1, BytesNode) or
isinstance(operand2, BytesNode)):
# special case when comparing char* to bytes literal: must
# compare string values!
new_common_type = bytes_type
# recursively merge types
if common_type is None or new_common_type.is_error:
common_type = new_common_type
else:
# we could do a lot better by splitting the comparison
# into a non-Python part and a Python part, but this is
# safer for now
common_type = PyrexTypes.spanning_type(common_type, new_common_type)
if self.cascade:
common_type = self.cascade.find_common_type(env, self.operator, operand2, common_type)
return common_type
def invalid_types_error(self, operand1, op, operand2):
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(op, operand1.type, operand2.type))
def is_python_comparison(self):
return (not self.is_ptr_contains()
and not self.is_c_string_contains()
and (self.has_python_operands()
or (self.cascade and self.cascade.is_python_comparison())
or self.operator in ('in', 'not_in')))
def coerce_operands_to(self, dst_type, env):
operand2 = self.operand2
if operand2.type != dst_type:
self.operand2 = operand2.coerce_to(dst_type, env)
if self.cascade:
self.cascade.coerce_operands_to(dst_type, env)
def is_python_result(self):
return ((self.has_python_operands() and
self.special_bool_cmp_function is None and
self.operator not in ('is', 'is_not', 'in', 'not_in') and
not self.is_c_string_contains() and
not self.is_ptr_contains())
or (self.cascade and self.cascade.is_python_result()))
def is_c_string_contains(self):
return self.operator in ('in', 'not_in') and \
((self.operand1.type.is_int
and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or
(self.operand1.type.is_unicode_char
and self.operand2.type is unicode_type))
def is_ptr_contains(self):
if self.operator in ('in', 'not_in'):
container_type = self.operand2.type
return (container_type.is_ptr or container_type.is_array) \
and not container_type.is_string
def find_special_bool_compare_function(self, env, operand1, result_is_bool=False):
# note: currently operand1 must get coerced to a Python object if we succeed here!
if self.operator in ('==', '!='):
type1, type2 = operand1.type, self.operand2.type
if result_is_bool or (type1.is_builtin_type and type2.is_builtin_type):
if type1 is Builtin.unicode_type or type2 is Builtin.unicode_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.bytes_type or type2 is Builtin.bytes_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("BytesEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyBytes_Equals"
return True
elif type1 is Builtin.basestring_type or type2 is Builtin.basestring_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.str_type or type2 is Builtin.str_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("StrEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyString_Equals"
return True
elif self.operator in ('in', 'not_in'):
if self.operand2.type is Builtin.dict_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PyDict_ContainsTF"
return True
elif self.operand2.type is Builtin.set_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySetContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PySet_ContainsTF"
return True
elif self.operand2.type is Builtin.unicode_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_ContainsTF"
return True
else:
if not self.operand2.type.is_pyobject:
self.operand2 = self.operand2.coerce_to_pyobject(env)
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySequenceContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PySequence_ContainsTF"
return True
return False
def generate_operation_code(self, code, result_code,
operand1, op , operand2):
if self.type.is_pyobject:
error_clause = code.error_goto_if_null
got_ref = "__Pyx_XGOTREF(%s); " % result_code
if self.special_bool_cmp_function:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyBoolOrNullFromLong", "ObjectHandling.c"))
coerce_result = "__Pyx_PyBoolOrNull_FromLong"
else:
coerce_result = "__Pyx_PyBool_FromLong"
else:
error_clause = code.error_goto_if_neg
got_ref = ""
coerce_result = ""
if self.special_bool_cmp_function:
if operand1.type.is_pyobject:
result1 = operand1.py_result()
else:
result1 = operand1.result()
if operand2.type.is_pyobject:
result2 = operand2.py_result()
else:
result2 = operand2.result()
if self.special_bool_cmp_utility_code:
code.globalstate.use_utility_code(self.special_bool_cmp_utility_code)
code.putln(
"%s = %s(%s(%s, %s, %s)); %s%s" % (
result_code,
coerce_result,
self.special_bool_cmp_function,
result1, result2, richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_pyobject and op not in ('is', 'is_not'):
assert op not in ('in', 'not_in'), op
code.putln("%s = PyObject_RichCompare(%s, %s, %s); %s%s" % (
result_code,
operand1.py_result(),
operand2.py_result(),
richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_complex:
code.putln("%s = %s(%s%s(%s, %s));" % (
result_code,
coerce_result,
op == "!=" and "!" or "",
operand1.type.unary_op('eq'),
operand1.result(),
operand2.result()))
else:
type1 = operand1.type
type2 = operand2.type
if (type1.is_extension_type or type2.is_extension_type) \
and not type1.same_as(type2):
common_type = py_object_type
elif type1.is_numeric:
common_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
common_type = type1
code1 = operand1.result_as(common_type)
code2 = operand2.result_as(common_type)
statement = "%s = %s(%s %s %s);" % (
result_code,
coerce_result,
code1,
self.c_operator(op),
code2)
if self.is_cpp_comparison() and self.exception_check == '+':
translate_cpp_exception(
code,
self.pos,
statement,
result_code if self.type.is_pyobject else None,
self.exception_value,
self.in_nogil_context)
else:
code.putln(statement)
def c_operator(self, op):
if op == 'is':
return "=="
elif op == 'is_not':
return "!="
else:
return op
class PrimaryCmpNode(ExprNode, CmpNode):
# Non-cascaded comparison or first comparison of
# a cascaded sequence.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
# cascade CascadedCmpNode
# cmp_func_type CFuncType or None
# We don't use the subexprs mechanism, because
# things here are too complicated for it to handle.
# Instead, we override all the framework methods
# which use it.
child_attrs = ['operand1', 'operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
is_memslice_nonecheck = False
cmp_func_type = None
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
operator = self.operator
if is_pythran_expr(type1) or is_pythran_expr(type2):
if is_pythran_supported_type(type1) and is_pythran_supported_type(type2):
return PythranExpr(pythran_binop_type(self.operator, type1, type2))
cpp_type = None
if type1.is_cpp_class or type1.is_ptr:
cpp_type = type1.find_cpp_operation_type(operator, type2)
if cpp_type is None and (type2.is_cpp_class or type2.is_ptr):
cpp_type = type2.find_cpp_operation_type(operator, type1)
if cpp_type is not None:
return cpp_type
# TODO: implement this for other types.
return py_object_type
def type_dependencies(self, env):
return ()
def calculate_constant_result(self):
assert not self.cascade
self.calculate_cascaded_constant_result(self.operand1.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
return self.cascaded_compile_time_value(operand1, denv)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
if self.is_cpp_py_comparison():
return self.analyse_cpp_py_comparison(env)
elif self.is_cpp_comparison():
self.analyse_cpp_comparison(env)
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for cpp types.")
return self
else:
return self.analyse_non_cpp_comparison(env)
def analyse_non_cpp_comparison(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
if is_pythran_expr(type1) or is_pythran_expr(type2):
if is_pythran_supported_type(type1) and is_pythran_supported_type(type2):
self.type = PythranExpr(pythran_binop_type(self.operator, type1, type2))
self.is_pycmp = False
return self
if self.analyse_memoryviewslice_comparison(env):
return self
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
if self.operator in ('in', 'not_in'):
if self.is_c_string_contains():
self.is_pycmp = False
common_type = None
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for 'int_val in string'.")
return self
if self.operand2.type is unicode_type:
env.use_utility_code(UtilityCode.load_cached("PyUCS4InUnicode", "StringTools.c"))
else:
if self.operand1.type is PyrexTypes.c_uchar_type:
self.operand1 = self.operand1.coerce_to(PyrexTypes.c_char_type, env)
if self.operand2.type is not bytes_type:
self.operand2 = self.operand2.coerce_to(bytes_type, env)
env.use_utility_code(UtilityCode.load_cached("BytesContains", "StringTools.c"))
self.operand2 = self.operand2.as_none_safe_node(
"argument of type 'NoneType' is not iterable")
elif self.is_ptr_contains():
if self.cascade:
error(self.pos, "Cascading comparison not supported for 'val in sliced pointer'.")
self.type = PyrexTypes.c_bint_type
# Will be transformed by IterationTransform
return self
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = py_object_type
self.is_pycmp = True
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = self.find_common_type(env, self.operator, self.operand1)
self.is_pycmp = common_type.is_pyobject
if common_type is not None and not common_type.is_error:
if self.operand1.type != common_type:
self.operand1 = self.operand1.coerce_to(common_type, env)
self.coerce_operands_to(common_type, env)
if self.cascade:
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
operand2 = self.cascade.optimise_comparison(self.operand2, env)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
if self.is_python_result():
self.type = PyrexTypes.py_object_type
else:
self.type = PyrexTypes.c_bint_type
cdr = self.cascade
while cdr:
cdr.type = self.type
cdr = cdr.cascade
if self.is_pycmp or self.cascade or self.special_bool_cmp_function:
# 1) owned reference, 2) reused value, 3) potential function error return value
self.is_temp = 1
return self
def analyse_cpp_py_comparison(self, env):
operator = self.operator
if self.operator in ('is', 'is_not'):
# non-overridable comparisons, let cpp analysis decide whether this is an error
self.analyse_cpp_comparison(env)
return self
if self.operand2.type.is_cyp_class and operator in ("in", "not_in"):
# swap operands
self.operand1, self.operand2 = self.operand2, self.operand1
operator = "__contains__"
entry = None
try:
entry = env.lookup_operator(operator, [self.operand1, self.operand2], throw=True)
except (PyrexTypes.AmbiguousCallException, PyrexTypes.NoTypeMatchCallException):
error(self.pos, ("Ambiguous operation with PyObject operand\n"
"To select one of the alternatives, explicitly cast the PyObject operand\n"
"To let Python handle the operation instead, cast the other operand to 'object'"
"\n"))
self.type = PyrexTypes.error_type
return self
except PyrexTypes.CallException:
pass
if entry:
self.analyse_cpp_comparison(env)
return self
else:
return self.analyse_non_cpp_comparison(env)
def analyse_cpp_comparison(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.is_pycmp = False
operator = self.operator
if type2.is_cyp_class and self.operator in ("in", "not_in"):
# swap operands
self.operand1, self.operand2 = self.operand2, self.operand1
operator = "__contains__"
entry = env.lookup_operator(operator, [self.operand1, self.operand2])
if entry is None:
if self.operator in ("is", "is_not"):
if type1.is_cyp_class and type2.is_cyp_class:
common_type = PyrexTypes.independent_spanning_type(type1, type2)
if common_type.is_error:
common_type = PyrexTypes.cy_object_type
if type1.is_const_cyp_class or type2.is_const_cyp_class:
if not common_type.is_const_cyp_class:
common_type = PyrexTypes.cyp_class_const_type(common_type)
# do not analyse the TypecastNode
# to ensure a simple pointer cast is used and not a user-defined conversion method
self.operand1 = TypecastNode(self.operand1.pos, operand=self.operand1, type=common_type)
self.operand2 = TypecastNode(self.operand2.pos, operand=self.operand2, type=common_type)
self.type = PyrexTypes.c_bint_type
return
elif (type1.is_ptr or type1.is_cyp_class) and (type2.is_ptr or type2.is_cyp_class):
self.type = PyrexTypes.c_bint_type
return
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(self.operator, type1, type2))
self.type = PyrexTypes.error_type
self.result_code = ""
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
self.cmp_func_type = func_type
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check == '+':
self.is_temp = True
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type.as_returned_type()
def analyse_memoryviewslice_comparison(self, env):
have_none = self.operand1.is_none or self.operand2.is_none
have_slice = (self.operand1.type.is_memoryviewslice or
self.operand2.type.is_memoryviewslice)
ops = ('==', '!=', 'is', 'is_not')
if have_slice and have_none and self.operator in ops:
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_memslice_nonecheck = True
return True
return False
def coerce_to_boolean(self, env):
if self.is_pycmp:
# coercing to bool => may allow for more efficient comparison code
if self.find_special_bool_compare_function(
env, self.operand1, result_is_bool=True):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_temp = 1
if self.cascade:
operand2 = self.cascade.optimise_comparison(
self.operand2, env, result_is_bool=True)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return self
# TODO: check if we can optimise parts of the cascade here
return ExprNode.coerce_to_boolean(self, env)
def has_python_operands(self):
return (self.operand1.type.is_pyobject
or self.operand2.type.is_pyobject)
def check_const(self):
if self.cascade:
self.not_const()
return False
else:
return self.operand1.check_const() and self.operand2.check_const()
def calculate_result_code(self):
operand1, operand2 = self.operand1, self.operand2
if operand1.type.is_complex:
if self.operator == "!=":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
operand1.type.binary_op('=='),
operand1.result(),
operand2.result())
elif self.is_c_string_contains():
if operand2.type is unicode_type:
method = "__Pyx_UnicodeContainsUCS4"
else:
method = "__Pyx_BytesContains"
if self.operator == "not_in":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
method,
operand2.result(),
operand1.result())
else:
if is_pythran_expr(self.type):
result1, result2 = operand1.pythran_result(), operand2.pythran_result()
else:
result1, result2 = operand1.result(), operand2.result()
if operand1.type.is_cyp_class and self.operator in ("in", "not_in"):
return "(%s%s->operator__contains__(%s))" % (
"!" if self.operator is "not_in" else "",
result1,
result2)
elif operand1.type.is_cyp_class and self.operator not in ("is", "is_not"):
result1 = "(*%s)" % result1
if self.is_memslice_nonecheck:
if operand1.type.is_memoryviewslice:
result1 = "((PyObject *) %s.memview)" % result1
else:
result2 = "((PyObject *) %s.memview)" % result2
return "(%s %s %s)" % (
result1,
self.c_operator(self.operator),
result2)
def generate_evaluation_code(self, code):
self.operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_operation_code(code, self.result(),
self.operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, self.result(), self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
self.operand1.generate_disposal_code(code)
self.operand1.free_temps(code)
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
def generate_subexpr_disposal_code(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.generate_disposal_code(code)
self.operand2.generate_disposal_code(code)
def free_subexpr_temps(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.free_temps(code)
self.operand2.free_temps(code)
def annotate(self, code):
self.operand1.annotate(code)
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
class CascadedCmpNode(Node, CmpNode):
# A CascadedCmpNode is not a complete expression node. It
# hangs off the side of another comparison node, shares
# its left operand with that node, and shares its result
# with the PrimaryCmpNode at the head of the chain.
#
# operator string
# operand2 ExprNode
# cascade CascadedCmpNode
child_attrs = ['operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
constant_result = constant_value_not_set # FIXME: where to calculate this?
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def analyse_types(self, env):
self.operand2 = self.operand2.analyse_types(env)
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
return self
def has_python_operands(self):
return self.operand2.type.is_pyobject
def is_cpp_comparison(self):
# cascaded comparisons aren't currently implemented for c++ classes.
return False
def optimise_comparison(self, operand1, env, result_is_bool=False):
if self.find_special_bool_compare_function(env, operand1, result_is_bool):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
if not operand1.type.is_pyobject:
operand1 = operand1.coerce_to_pyobject(env)
if self.cascade:
operand2 = self.cascade.optimise_comparison(self.operand2, env, result_is_bool)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return operand1
def coerce_operands_to_pyobjects(self, env):
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.operand2.type is dict_type and self.operator in ('in', 'not_in'):
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
if self.cascade:
self.cascade.coerce_operands_to_pyobjects(env)
def coerce_cascaded_operands_to_temp(self, env):
if self.cascade:
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
def generate_evaluation_code(self, code, result, operand1, needs_evaluation=False):
if self.type.is_pyobject:
code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result)
code.put_decref(result, self.type)
else:
code.putln("if (%s) {" % result)
if needs_evaluation:
operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
self.generate_operation_code(code, result,
operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, result, self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
if needs_evaluation:
operand1.generate_disposal_code(code)
operand1.free_temps(code)
# Cascaded cmp result is always temp
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
code.putln("}")
def annotate(self, code):
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
binop_node_classes = {
"or": BoolBinopNode,
"and": BoolBinopNode,
"|": IntBinopNode,
"^": IntBinopNode,
"&": IntBinopNode,
"<<": IntBinopNode,
">>": IntBinopNode,
"+": AddNode,
"-": SubNode,
"*": MulNode,
"@": MatMultNode,
"/": DivNode,
"//": DivNode,
"%": ModNode,
"**": PowNode,
}
def binop_node(pos, operator, operand1, operand2, inplace=False, **kwargs):
# Construct binop node of appropriate class for
# given operator.
return binop_node_classes[operator](
pos,
operator=operator,
operand1=operand1,
operand2=operand2,
inplace=inplace,
**kwargs)
#-------------------------------------------------------------------
#
# Coercion nodes
#
# Coercion nodes are special in that they are created during
# the analyse_types phase of parse tree processing.
# Their __init__ methods consequently incorporate some aspects
# of that phase.
#
#-------------------------------------------------------------------
class CoercionNode(ExprNode):
# Abstract base class for coercion nodes.
#
# arg ExprNode node being coerced
subexprs = ['arg']
constant_result = not_a_constant
def __init__(self, arg):
super(CoercionNode, self).__init__(arg.pos)
self.arg = arg
if debug_coercion:
print("%s Coercing %s" % (self, self.arg))
def calculate_constant_result(self):
# constant folding can break type coercion, so this is disabled
pass
def annotate(self, code):
self.arg.annotate(code)
if self.arg.type != self.type:
file, line, col = self.pos
code.annotate((file, line, col-1), AnnotationItem(
style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type)))
class CoerceToMemViewSliceNode(CoercionNode):
"""
Coerce an object to a memoryview slice. This holds a new reference in
a managed temp.
"""
def __init__(self, arg, dst_type, env):
assert dst_type.is_memoryviewslice
assert not arg.type.is_memoryviewslice
CoercionNode.__init__(self, arg)
self.type = dst_type
self.is_temp = 1
self.env = env
self.use_managed_ref = True
self.arg = arg
def generate_result_code(self, code):
self.type.create_from_py_utility_code(self.env)
code.putln(self.type.from_py_call_code(
self.arg.py_result(),
self.result(),
self.pos,
code
))
class CastNode(CoercionNode):
# Wrap a node in a C type cast.
def __init__(self, arg, new_type):
CoercionNode.__init__(self, arg)
self.type = new_type
def may_be_none(self):
return self.arg.may_be_none()
def calculate_result_code(self):
return self.arg.result_as(self.type)
def generate_result_code(self, code):
self.arg.generate_result_code(code)
class PyTypeTestNode(CoercionNode):
# This node is used to check that a generic Python
# object is an instance of a particular extension type.
# This node borrows the result of its argument node.
exact_builtin_type = True
def __init__(self, arg, dst_type, env, notnone=False):
# The arg is known to be a Python object, and
# the dst_type is known to be an extension type.
assert dst_type.is_extension_type or dst_type.is_builtin_type, \
"PyTypeTest for %s against non extension type %s" % (arg.type, dst_type)
CoercionNode.__init__(self, arg)
self.type = dst_type
self.result_ctype = arg.ctype()
self.notnone = notnone
nogil_check = Node.gil_error
gil_message = "Python type test"
def analyse_types(self, env):
return self
def may_be_none(self):
if self.notnone:
return False
return self.arg.may_be_none()
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def is_ephemeral(self):
return self.arg.is_ephemeral()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def reanalyse(self):
if self.type != self.arg.type or not self.arg.is_temp:
return self
if not self.type.typeobj_is_available():
return self
if self.arg.may_be_none() and self.notnone:
return self.arg.as_none_safe_node("Cannot convert NoneType to %.200s" % self.type.name)
return self.arg
def calculate_constant_result(self):
# FIXME
pass
def calculate_result_code(self):
return self.arg.result()
def generate_result_code(self, code):
if self.type.typeobj_is_available():
if self.type.is_builtin_type:
type_test = self.type.type_test_code(
self.arg.py_result(),
self.notnone, exact=self.exact_builtin_type)
code.globalstate.use_utility_code(UtilityCode.load_cached(
"RaiseUnexpectedTypeError", "ObjectHandling.c"))
else:
type_test = self.type.type_test_code(
self.arg.py_result(), self.notnone)
code.globalstate.use_utility_code(
UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
code.putln("if (!(%s)) %s" % (
type_test, code.error_goto(self.pos)))
else:
error(self.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def allocate_temp_result(self, code):
pass
def release_temp_result(self, code):
pass
def free_temps(self, code):
self.arg.free_temps(code)
def free_subexpr_temps(self, code):
self.arg.free_subexpr_temps(code)
class NoneCheckNode(CoercionNode):
# This node is used to check that a Python object is not None and
# raises an appropriate exception (as specified by the creating
# transform).
is_nonecheck = True
def __init__(self, arg, exception_type_cname, exception_message,
exception_format_args=()):
CoercionNode.__init__(self, arg)
self.type = arg.type
self.result_ctype = arg.ctype()
self.exception_type_cname = exception_type_cname
self.exception_message = exception_message
self.exception_format_args = tuple(exception_format_args or ())
nogil_check = None # this node only guards an operation that would fail already
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_result_code(self):
return self.arg.result()
def condition(self):
if self.type.is_pyobject:
return self.arg.py_result()
elif self.type.is_memoryviewslice:
return "((PyObject *) %s.memview)" % self.arg.result()
else:
raise Exception("unsupported type")
@classmethod
def generate(cls, arg, code, exception_message,
exception_type_cname="PyExc_TypeError", exception_format_args=(), in_nogil_context=False):
node = cls(arg, exception_type_cname, exception_message, exception_format_args)
node.in_nogil_context = in_nogil_context
node.put_nonecheck(code)
@classmethod
def generate_if_needed(cls, arg, code, exception_message,
exception_type_cname="PyExc_TypeError", exception_format_args=(), in_nogil_context=False):
if arg.may_be_none():
cls.generate(arg, code, exception_message, exception_type_cname, exception_format_args, in_nogil_context)
def put_nonecheck(self, code):
code.putln(
"if (unlikely(%s == Py_None)) {" % self.condition())
if self.in_nogil_context:
code.put_ensure_gil()
escape = StringEncoding.escape_byte_string
if self.exception_format_args:
code.putln('PyErr_Format(%s, "%s", %s);' % (
self.exception_type_cname,
StringEncoding.escape_byte_string(
self.exception_message.encode('UTF-8')),
', '.join([ '"%s"' % escape(str(arg).encode('UTF-8'))
for arg in self.exception_format_args ])))
else:
code.putln('PyErr_SetString(%s, "%s");' % (
self.exception_type_cname,
escape(self.exception_message.encode('UTF-8'))))
if self.in_nogil_context:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
def generate_result_code(self, code):
self.put_nonecheck(code)
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CoerceToPyTypeNode(CoercionNode):
# This node is used to convert a C data type
# to a Python object.
type = py_object_type
target_type = py_object_type
is_temp = 1
def __init__(self, arg, env, type=py_object_type):
if not arg.type.create_to_py_utility_code(env):
error(arg.pos, "Cannot convert '%s' to Python object" % arg.type)
elif arg.type.is_complex:
# special case: complex coercion is so complex that it
# uses a macro ("__pyx_PyComplex_FromComplex()"), for
# which the argument must be simple
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
if type is py_object_type:
# be specific about some known types
if arg.type.is_string or arg.type.is_cpp_string:
self.type = default_str_type(env)
elif arg.type.is_pyunicode_ptr or arg.type.is_unicode_char:
self.type = unicode_type
elif arg.type.is_complex:
self.type = Builtin.complex_type
self.target_type = self.type
elif arg.type.is_string or arg.type.is_cpp_string:
if (type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(arg.pos,
"default encoding required for conversion from '%s' to '%s'" %
(arg.type, type))
self.type = self.target_type = type
else:
# FIXME: check that the target type and the resulting type are compatible
self.target_type = type
gil_message = "Converting to Python object"
def may_be_none(self):
# FIXME: is this always safe?
return False
def coerce_to_boolean(self, env):
arg_type = self.arg.type
if (arg_type == PyrexTypes.c_bint_type or
(arg_type.is_pyobject and arg_type.name == 'bool')):
return self.arg.coerce_to_temp(env)
else:
return CoerceToBooleanNode(self, env)
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.arg.type.is_int:
return self.arg
else:
return self.arg.coerce_to(PyrexTypes.c_long_type, env)
def analyse_types(self, env):
# The arg is always already analysed
return self
def generate_result_code(self, code):
code.putln('%s; %s' % (
self.arg.type.to_py_call_code(
self.arg.result(),
self.result(),
self.target_type),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
class CoerceIntToBytesNode(CoerceToPyTypeNode):
# This node is used to convert a C int type to a Python bytes
# object.
is_temp = 1
def __init__(self, arg, env):
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
self.type = Builtin.bytes_type
def generate_result_code(self, code):
arg = self.arg
arg_result = arg.result()
if arg.type not in (PyrexTypes.c_char_type,
PyrexTypes.c_uchar_type,
PyrexTypes.c_schar_type):
if arg.type.signed:
code.putln("if ((%s < 0) || (%s > 255)) {" % (
arg_result, arg_result))
else:
code.putln("if (%s > 255) {" % arg_result)
code.putln('PyErr_SetString(PyExc_OverflowError, '
'"value too large to pack into a byte"); %s' % (
code.error_goto(self.pos)))
code.putln('}')
temp = None
if arg.type is not PyrexTypes.c_char_type:
temp = code.funcstate.allocate_temp(PyrexTypes.c_char_type, manage_ref=False)
code.putln("%s = (char)%s;" % (temp, arg_result))
arg_result = temp
code.putln('%s = PyBytes_FromStringAndSize(&%s, 1); %s' % (
self.result(),
arg_result,
code.error_goto_if_null(self.result(), self.pos)))
if temp is not None:
code.funcstate.release_temp(temp)
self.generate_gotref(code)
class CoerceFromPyTypeNode(CoercionNode):
# This node is used to convert a Python object
# to a C data type.
def __init__(self, result_type, arg, env):
CoercionNode.__init__(self, arg)
self.type = result_type
self.is_temp = 1
if not result_type.create_from_py_utility_code(env):
error(arg.pos,
"Cannot convert Python object to '%s'" % result_type)
if self.type.is_string or self.type.is_pyunicode_ptr:
if self.arg.is_name and self.arg.entry and self.arg.entry.is_pyglobal:
warning(arg.pos,
"Obtaining '%s' from externally modifiable global Python value" % result_type,
level=1)
def analyse_types(self, env):
# The arg is always already analysed
return self
def is_ephemeral(self):
return (self.type.is_ptr and not self.type.is_array) and self.arg.is_ephemeral()
def generate_result_code(self, code):
from_py_function = None
# for certain source types, we can do better than the generic coercion
if self.type.is_string and self.arg.type is bytes_type:
if self.type.from_py_function.startswith('__Pyx_PyObject_As'):
from_py_function = '__Pyx_PyBytes' + self.type.from_py_function[len('__Pyx_PyObject'):]
NoneCheckNode.generate_if_needed(self.arg, code, "expected bytes, NoneType found")
code.putln(self.type.from_py_call_code(
self.arg.py_result(), self.result(), self.pos, code, from_py_function=from_py_function))
if self.type.is_pyobject:
self.generate_gotref(code)
def nogil_check(self, env):
error(self.pos, "Coercion from Python not allowed without the GIL")
class CoerceToBooleanNode(CoercionNode):
# This node is used when a result needs to be used
# in a boolean context.
type = PyrexTypes.c_bint_type
_special_builtins = {
Builtin.list_type: 'PyList_GET_SIZE',
Builtin.tuple_type: 'PyTuple_GET_SIZE',
Builtin.set_type: 'PySet_GET_SIZE',
Builtin.frozenset_type: 'PySet_GET_SIZE',
Builtin.bytes_type: 'PyBytes_GET_SIZE',
Builtin.bytearray_type: 'PyByteArray_GET_SIZE',
Builtin.unicode_type: '__Pyx_PyUnicode_IS_TRUE',
}
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
if arg.type.is_pyobject:
self.is_temp = 1
def nogil_check(self, env):
if self.arg.type.is_pyobject and self._special_builtins.get(self.arg.type) is None:
self.gil_error()
gil_message = "Truth-testing Python object"
def check_const(self):
if self.is_temp:
self.not_const()
return False
return self.arg.check_const()
def calculate_result_code(self):
return "(%s != 0)" % self.arg.result()
def generate_result_code(self, code):
if not self.is_temp:
return
test_func = self._special_builtins.get(self.arg.type)
if test_func is not None:
checks = ["(%s != Py_None)" % self.arg.py_result()] if self.arg.may_be_none() else []
checks.append("(%s(%s) != 0)" % (test_func, self.arg.py_result()))
code.putln("%s = %s;" % (self.result(), '&&'.join(checks)))
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_neg(self.result(), self.pos)))
def analyse_types(self, env):
return self
class CoerceFromCallable(CoercionNode):
# This node is used to wrap a callable cpp-typed node when it is called as part of a SimpleCallNode.
# The cpp type of the callable is replaced by the type of the operator() method during expression analysis;
# this node allows the underlying callable node to keep its original type.
def __init__(self, arg):
CoercionNode.__init__(self, arg)
self.type = arg.type
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def result(self):
return self.arg.result()
def is_simple(self):
return self.arg.is_simple()
def may_be_none(self):
return self.arg.may_be_none()
def generate_evaluation_code(self, code):
self.arg.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.arg.generate_disposal_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CoerceToComplexNode(CoercionNode):
def __init__(self, arg, dst_type, env):
if arg.type.is_complex:
arg = arg.coerce_to_simple(env)
self.type = dst_type
CoercionNode.__init__(self, arg)
dst_type.create_declaration_utility_code(env)
def calculate_result_code(self):
if self.arg.type.is_complex:
real_part = "__Pyx_CREAL(%s)" % self.arg.result()
imag_part = "__Pyx_CIMAG(%s)" % self.arg.result()
else:
real_part = self.arg.result()
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
def generate_result_code(self, code):
pass
def analyse_types(self, env):
return self
class CoerceToTempNode(CoercionNode):
# This node is used to force the result of another node
# to be stored in a temporary. It is only used if the
# argument node's result is not already in a temporary.
def __init__(self, arg, env=None):
CoercionNode.__init__(self, arg)
self.type = self.arg.type.as_argument_type()
self.constant_result = self.arg.constant_result
self.is_temp = 1
if self.type.is_pyobject:
self.result_ctype = py_object_type
gil_message = "Creating temporary Python reference"
def analyse_types(self, env):
# The arg is always already analysed
return self
def may_be_none(self):
return self.arg.may_be_none()
def coerce_to_boolean(self, env):
self.arg = self.arg.coerce_to_boolean(env)
if self.arg.is_simple():
return self.arg
self.type = self.arg.type
self.result_ctype = self.type
return self
def generate_result_code(self, code):
#self.arg.generate_evaluation_code(code) # Already done
# by generic generate_subexpr_evaluation_code!
code.putln("%s = %s;" % (
self.result(), self.arg.result_as(self.ctype())))
if self.use_managed_ref:
if not self.type.is_memoryviewslice:
code.put_incref(self.result(), self.ctype())
else:
code.put_incref_memoryviewslice(self.result(), self.type,
have_gil=not self.in_nogil_context)
class CoerceToLockedNode(CoercionNode):
# This node is used to lock a node of cypclass type around the evaluation of its subexpressions.
# exclusive boolean
# needs_decref boolean used internally
def __init__(self, arg, env=None, exclusive=True):
self.exclusive = exclusive
self.type = arg.type
temp_arg = arg.coerce_to_temp(env)
temp_arg.postpone_subexpr_disposal = True
# Avoid incrementing the reference count when assigning to the temporary
# but ensure it will be decremented if it was already incremented previously.
self.needs_decref = not temp_arg.use_managed_ref
temp_arg.use_managed_ref = False
super(CoerceToLockedNode,self).__init__(temp_arg)
def result(self):
return self.arg.result()
def is_simple(self):
return False
def may_be_none(self):
return self.arg.may_be_none()
def generate_result_code(self, code):
#XXX Code duplicated from Nodes.LockCypclassNode.
if self.arg.pos:
source_descr, lineno, colno = self.arg.pos
source_str = source_descr.get_description()
source_lines = source_descr.get_lines()
line_str = source_lines[lineno - 1]
col_str = "%s%s" % (' ' * colno, '^')
context = "%s:%d:%d\n%s%s" % (source_str, lineno, colno, line_str, col_str)
context = code.get_string_const(StringEncoding.EncodedString(context))
else:
context = "NULL"
# Create a scope to use scope bound resource management (RAII).
code.putln("{")
# Since each lock guard has its own scope,
# a prefix is enough to prevent name collisions.
guard_code = "%sguard" % Naming.cypclass_lock_guard_prefix
if self.exclusive:
code.putln("Cy_wlock_guard %s(%s, %s);" % (guard_code, self.result(), context))
else:
code.putln("Cy_rlock_guard %s(%s, %s);" % (guard_code, self.result(), context))
def generate_disposal_code(self, code):
# Close the scope to release the lock.
code.putln("}")
# Dispose of and free subexpressions.
self.arg.generate_subexpr_disposal_code(code)
self.arg.free_subexpr_temps(code)
# Decref only if previously incref-ed.
if self.needs_decref:
code.put_xdecref_clear(self.result(), self.ctype(), have_gil=not self.in_nogil_context)
class ProxyNode(CoercionNode):
"""
A node that should not be replaced by transforms or other means,
and hence can be useful to wrap the argument to a clone node
MyNode -> ProxyNode -> ArgNode
CloneNode -^
"""
nogil_check = None
def __init__(self, arg):
super(ProxyNode, self).__init__(arg)
self.constant_result = arg.constant_result
self._proxy_type()
def analyse_types(self, env):
self.arg = self.arg.analyse_expressions(env)
self._proxy_type()
return self
def infer_type(self, env):
return self.arg.infer_type(env)
def _proxy_type(self):
type = getattr(self.arg, 'type', None)
if type:
self.type = type
self.result_ctype = self.arg.result_ctype
arg_entry = getattr(self.arg, 'entry', None)
if arg_entry:
self.entry = arg_entry
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def result(self):
return self.arg.result()
def is_simple(self):
return self.arg.is_simple()
def may_be_none(self):
return self.arg.may_be_none()
def generate_evaluation_code(self, code):
self.arg.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.arg.generate_disposal_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CloneNode(CoercionNode):
# This node is employed when the result of another node needs
# to be used multiple times. The argument node's result must
# be in a temporary. This node "borrows" the result from the
# argument node, and does not generate any evaluation or
# disposal code for it. The original owner of the argument
# node is responsible for doing those things.
subexprs = [] # Arg is not considered a subexpr
nogil_check = None
def __init__(self, arg):
CoercionNode.__init__(self, arg)
self.constant_result = arg.constant_result
type = getattr(arg, 'type', None)
if type:
self.type = type
self.result_ctype = arg.result_ctype
arg_entry = getattr(arg, 'entry', None)
if arg_entry:
self.entry = arg_entry
def result(self):
return self.arg.result()
def may_be_none(self):
return self.arg.may_be_none()
def type_dependencies(self, env):
return self.arg.type_dependencies(env)
def infer_type(self, env):
return self.arg.infer_type(env)
def analyse_types(self, env):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
self.is_temp = 1
arg_entry = getattr(self.arg, 'entry', None)
if arg_entry:
self.entry = arg_entry
return self
def coerce_to(self, dest_type, env):
if self.arg.is_literal:
return self.arg.coerce_to(dest_type, env)
return super(CloneNode, self).coerce_to(dest_type, env)
def is_simple(self):
return True # result is always in a temp (or a name)
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
def generate_disposal_code(self, code):
pass
def free_temps(self, code):
pass
class CMethodSelfCloneNode(CloneNode):
# Special CloneNode for the self argument of builtin C methods
# that accepts subtypes of the builtin type. This is safe only
# for 'final' subtypes, as subtypes of the declared type may
# override the C method.
def coerce_to(self, dst_type, env):
if dst_type.is_builtin_type and self.type.subtype_of(dst_type):
return self
return CloneNode.coerce_to(self, dst_type, env)
class ModuleRefNode(ExprNode):
# Simple returns the module object
type = py_object_type
is_temp = False
subexprs = []
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def calculate_result_code(self):
return Naming.module_cname
def generate_result_code(self, code):
pass
class DocstringRefNode(ExprNode):
# Extracts the docstring of the body element
subexprs = ['body']
type = py_object_type
is_temp = True
def __init__(self, pos, body):
ExprNode.__init__(self, pos)
assert body.type.is_pyobject
self.body = body
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.putln('%s = __Pyx_GetAttr(%s, %s); %s' % (
self.result(), self.body.result(),
code.intern_identifier(StringEncoding.EncodedString("__doc__")),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
class AnnotationNode(ExprNode):
# Deals with the two possible uses of an annotation.
# 1. The post PEP-563 use where an annotation is stored
# as a string
# 2. The Cython use where the annotation can indicate an
# object type
#
# Doesn't handle the pre PEP-563 version where the
# annotation is evaluated into a Python Object.
subexprs = []
# 'untyped' is set for fused specializations:
# Once a fused function has been created we don't want
# annotations to override an already set type.
untyped = False
def __init__(self, pos, expr, string=None):
"""string is expected to already be a StringNode or None"""
ExprNode.__init__(self, pos)
if string is None:
# import doesn't work at top of file?
from .AutoDocTransforms import AnnotationWriter
string = StringEncoding.EncodedString(
AnnotationWriter(description="annotation").write(expr))
string = StringNode(pos, unicode_value=string, value=string.as_utf8_string())
self.string = string
self.expr = expr
def analyse_types(self, env):
return self # nothing needs doing
def analyse_as_type(self, env):
# for compatibility when used as a return_type_node, have this interface too
return self.analyse_type_annotation(env)[1]
def analyse_type_annotation(self, env, assigned_value=None):
if self.untyped:
# Already applied as a fused type, not re-evaluating it here.
return None, None
annotation = self.expr
base_type = None
is_ambiguous = False
explicit_pytype = explicit_ctype = False
if annotation.is_dict_literal:
warning(annotation.pos,
"Dicts should no longer be used as type annotations. Use 'cython.int' etc. directly.", level=1)
for name, value in annotation.key_value_pairs:
if not name.is_string_literal:
continue
if name.value in ('type', b'type'):
explicit_pytype = True
if not explicit_ctype:
annotation = value
elif name.value in ('ctype', b'ctype'):
explicit_ctype = True
annotation = value
if explicit_pytype and explicit_ctype:
warning(annotation.pos, "Duplicate type declarations found in signature annotation", level=1)
arg_type = annotation.analyse_as_type(env)
if annotation.is_name and not annotation.cython_attribute and annotation.name in ('int', 'long', 'float'):
# Map builtin numeric Python types to C types in safe cases.
if assigned_value is not None and arg_type is not None and not arg_type.is_pyobject:
assigned_type = assigned_value.infer_type(env)
if assigned_type and assigned_type.is_pyobject:
# C type seems unsafe, e.g. due to 'None' default value => ignore annotation type
is_ambiguous = True
arg_type = None
# ignore 'int' and require 'cython.int' to avoid unsafe integer declarations
if arg_type in (PyrexTypes.c_long_type, PyrexTypes.c_int_type, PyrexTypes.c_float_type):
arg_type = PyrexTypes.c_double_type if annotation.name == 'float' else py_object_type
elif arg_type is not None and annotation.is_string_literal:
warning(annotation.pos,
"Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.",
level=1)
if arg_type is not None:
if explicit_pytype and not explicit_ctype and not arg_type.is_pyobject:
warning(annotation.pos,
"Python type declaration in signature annotation does not refer to a Python type")
base_type = Nodes.CAnalysedBaseTypeNode(
annotation.pos, type=arg_type, is_arg=True)
elif is_ambiguous:
warning(annotation.pos, "Ambiguous types in annotation, ignoring")
else:
warning(annotation.pos, "Unknown type declaration in annotation, ignoring")
return base_type, arg_type
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
pyerr_occurred_withgil_utility_code= UtilityCode(
proto = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void); /* proto */
""",
impl = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) {
int err;
#ifdef WITH_THREAD
PyGILState_STATE _save = PyGILState_Ensure();
#endif
err = !!PyErr_Occurred();
#ifdef WITH_THREAD
PyGILState_Release(_save);
#endif
return err;
}
"""
)
#------------------------------------------------------------------------------------
raise_unbound_local_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
""")
raise_closure_name_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) {
PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname);
}
""")
# Don't inline the function, it should really never be called in production
raise_unbound_memoryview_utility_code_nogil = UtilityCode(
proto = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname);
""",
impl = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname) {
#ifdef WITH_THREAD
PyGILState_STATE gilstate = PyGILState_Ensure();
#endif
__Pyx_RaiseUnboundLocalError(varname);
#ifdef WITH_THREAD
PyGILState_Release(gilstate);
#endif
}
""",
requires = [raise_unbound_local_error_utility_code])
#------------------------------------------------------------------------------------
raise_too_many_values_to_unpack = UtilityCode.load_cached("RaiseTooManyValuesToUnpack", "ObjectHandling.c")
raise_need_more_values_to_unpack = UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c")
tuple_unpacking_error_code = UtilityCode.load_cached("UnpackTupleError", "ObjectHandling.c")
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/FlowControl.pxd 0000664 0000000 0000000 00000005602 13766135517 0031261 0 ustar 00root root 0000000 0000000 from __future__ import absolute_import
cimport cython
from .Visitor cimport CythonTransform, TreeVisitor
cdef class ControlBlock:
cdef public set children
cdef public set parents
cdef public set positions
cdef public list stats
cdef public dict gen
cdef public set bounded
# Big integer bitsets
cdef public object i_input
cdef public object i_output
cdef public object i_gen
cdef public object i_kill
cdef public object i_state
cpdef bint empty(self)
cpdef detach(self)
cpdef add_child(self, block)
cdef class ExitBlock(ControlBlock):
cpdef bint empty(self)
cdef class NameAssignment:
cdef public bint is_arg
cdef public bint is_deletion
cdef public object lhs
cdef public object rhs
cdef public object entry
cdef public object pos
cdef public set refs
cdef public object bit
cdef public object inferred_type
cdef class AssignmentList:
cdef public object bit
cdef public object mask
cdef public list stats
cdef class AssignmentCollector(TreeVisitor):
cdef list assignments
@cython.final
cdef class ControlFlow:
cdef public set blocks
cdef public set entries
cdef public list loops
cdef public list exceptions
cdef public ControlBlock entry_point
cdef public ExitBlock exit_point
cdef public ControlBlock block
cdef public dict assmts
cpdef newblock(self, ControlBlock parent=*)
cpdef nextblock(self, ControlBlock parent=*)
cpdef bint is_tracked(self, entry)
cpdef bint is_statically_assigned(self, entry)
cpdef mark_position(self, node)
cpdef mark_assignment(self, lhs, rhs, entry)
cpdef mark_argument(self, lhs, rhs, entry)
cpdef mark_deletion(self, node, entry)
cpdef mark_reference(self, node, entry)
@cython.locals(block=ControlBlock, parent=ControlBlock, unreachable=set)
cpdef normalize(self)
@cython.locals(bit=object, assmts=AssignmentList,
block=ControlBlock)
cpdef initialize(self)
@cython.locals(assmts=AssignmentList, assmt=NameAssignment)
cpdef set map_one(self, istate, entry)
@cython.locals(block=ControlBlock, parent=ControlBlock)
cdef reaching_definitions(self)
cdef class Uninitialized:
pass
cdef class Unknown:
pass
cdef class MessageCollection:
cdef set messages
@cython.locals(dirty=bint, block=ControlBlock, parent=ControlBlock,
assmt=NameAssignment)
cdef check_definitions(ControlFlow flow, dict compiler_directives)
@cython.final
cdef class ControlFlowAnalysis(CythonTransform):
cdef object gv_ctx
cdef object constant_folder
cdef set reductions
cdef list env_stack
cdef list stack
cdef object env
cdef ControlFlow flow
cdef object object_expr
cdef bint in_inplace_assignment
cpdef mark_assignment(self, lhs, rhs=*)
cpdef mark_position(self, node)
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/FlowControl.py 0000664 0000000 0000000 00000133741 13766135517 0031124 0 ustar 00root root 0000000 0000000 # cython: language_level=3str
# cython: auto_pickle=True
from __future__ import absolute_import
import cython
cython.declare(PyrexTypes=object, ExprNodes=object, Nodes=object,
Builtin=object, InternalError=object, error=object, warning=object,
fake_rhs_expr=object, TypedExprNode=object)
from . import Builtin
from . import ExprNodes
from . import Nodes
from . import Options
from . import PyrexTypes
from .Visitor import TreeVisitor, CythonTransform
from .Errors import error, warning, InternalError
from .Optimize import ConstantFolding
class TypedExprNode(ExprNodes.ExprNode):
# Used for declaring assignments of a specified type without a known entry.
def __init__(self, type, may_be_none=None, pos=None):
super(TypedExprNode, self).__init__(pos)
self.type = type
self._may_be_none = may_be_none
def may_be_none(self):
return self._may_be_none != False
# Fake rhs to silence "unused variable" warning
fake_rhs_expr = TypedExprNode(PyrexTypes.unspecified_type)
class ControlBlock(object):
"""Control flow graph node. Sequence of assignments and name references.
children set of children nodes
parents set of parent nodes
positions set of position markers
stats list of block statements
gen dict of assignments generated by this block
bounded set of entries that are definitely bounded in this block
Example:
a = 1
b = a + c # 'c' is already bounded or exception here
stats = [Assignment(a), NameReference(a), NameReference(c),
Assignment(b)]
gen = {Entry(a): Assignment(a), Entry(b): Assignment(b)}
bounded = set([Entry(a), Entry(c)])
"""
def __init__(self):
self.children = set()
self.parents = set()
self.positions = set()
self.stats = []
self.gen = {}
self.bounded = set()
self.i_input = 0
self.i_output = 0
self.i_gen = 0
self.i_kill = 0
self.i_state = 0
def empty(self):
return (not self.stats and not self.positions)
def detach(self):
"""Detach block from parents and children."""
for child in self.children:
child.parents.remove(self)
for parent in self.parents:
parent.children.remove(self)
self.parents.clear()
self.children.clear()
def add_child(self, block):
self.children.add(block)
block.parents.add(self)
class ExitBlock(ControlBlock):
"""Non-empty exit point block."""
def empty(self):
return False
class AssignmentList(object):
def __init__(self):
self.stats = []
class ControlFlow(object):
"""Control-flow graph.
entry_point ControlBlock entry point for this graph
exit_point ControlBlock normal exit point
block ControlBlock current block
blocks set children nodes
entries set tracked entries
loops list stack for loop descriptors
exceptions list stack for exception descriptors
"""
def __init__(self):
self.blocks = set()
self.entries = set()
self.loops = []
self.exceptions = []
self.entry_point = ControlBlock()
self.exit_point = ExitBlock()
self.blocks.add(self.exit_point)
self.block = self.entry_point
def newblock(self, parent=None):
"""Create floating block linked to `parent` if given.
NOTE: Block is NOT added to self.blocks
"""
block = ControlBlock()
self.blocks.add(block)
if parent:
parent.add_child(block)
return block
def nextblock(self, parent=None):
"""Create block children block linked to current or `parent` if given.
NOTE: Block is added to self.blocks
"""
block = ControlBlock()
self.blocks.add(block)
if parent:
parent.add_child(block)
elif self.block:
self.block.add_child(block)
self.block = block
return self.block
def is_tracked(self, entry):
if entry.is_anonymous:
return False
return (entry.is_local or entry.is_pyclass_attr or entry.is_arg or
entry.from_closure or entry.in_closure or
entry.error_on_uninitialized)
def is_statically_assigned(self, entry):
if (entry.is_local and entry.is_variable and
(entry.type.is_struct_or_union or
entry.type.is_complex or
entry.type.is_array or
entry.type.is_cpp_class)):
# stack allocated structured variable => never uninitialised
return True
return False
def mark_position(self, node):
"""Mark position, will be used to draw graph nodes."""
if self.block:
self.block.positions.add(node.pos[:2])
def mark_assignment(self, lhs, rhs, entry):
if self.block and self.is_tracked(entry):
assignment = NameAssignment(lhs, rhs, entry)
self.block.stats.append(assignment)
self.block.gen[entry] = assignment
self.entries.add(entry)
def mark_argument(self, lhs, rhs, entry):
if self.block and self.is_tracked(entry):
assignment = Argument(lhs, rhs, entry)
self.block.stats.append(assignment)
self.block.gen[entry] = assignment
self.entries.add(entry)
def mark_deletion(self, node, entry):
if self.block and self.is_tracked(entry):
assignment = NameDeletion(node, entry)
self.block.stats.append(assignment)
self.block.gen[entry] = Uninitialized
self.entries.add(entry)
def mark_reference(self, node, entry):
if self.block and self.is_tracked(entry):
self.block.stats.append(NameReference(node, entry))
## XXX: We don't track expression evaluation order so we can't use
## XXX: successful reference as initialization sign.
## # Local variable is definitely bound after this reference
## if not node.allow_null:
## self.block.bounded.add(entry)
self.entries.add(entry)
def normalize(self):
"""Delete unreachable and orphan blocks."""
queue = set([self.entry_point])
visited = set()
while queue:
root = queue.pop()
visited.add(root)
for child in root.children:
if child not in visited:
queue.add(child)
unreachable = self.blocks - visited
for block in unreachable:
block.detach()
visited.remove(self.entry_point)
for block in visited:
if block.empty():
for parent in block.parents: # Re-parent
for child in block.children:
parent.add_child(child)
block.detach()
unreachable.add(block)
self.blocks -= unreachable
def initialize(self):
"""Set initial state, map assignments to bits."""
self.assmts = {}
bit = 1
for entry in self.entries:
assmts = AssignmentList()
assmts.mask = assmts.bit = bit
self.assmts[entry] = assmts
bit <<= 1
for block in self.blocks:
for stat in block.stats:
if isinstance(stat, NameAssignment):
stat.bit = bit
assmts = self.assmts[stat.entry]
assmts.stats.append(stat)
assmts.mask |= bit
bit <<= 1
for block in self.blocks:
for entry, stat in block.gen.items():
assmts = self.assmts[entry]
if stat is Uninitialized:
block.i_gen |= assmts.bit
else:
block.i_gen |= stat.bit
block.i_kill |= assmts.mask
block.i_output = block.i_gen
for entry in block.bounded:
block.i_kill |= self.assmts[entry].bit
for assmts in self.assmts.values():
self.entry_point.i_gen |= assmts.bit
self.entry_point.i_output = self.entry_point.i_gen
def map_one(self, istate, entry):
ret = set()
assmts = self.assmts[entry]
if istate & assmts.bit:
if self.is_statically_assigned(entry):
ret.add(StaticAssignment(entry))
elif entry.from_closure:
ret.add(Unknown)
else:
ret.add(Uninitialized)
for assmt in assmts.stats:
if istate & assmt.bit:
ret.add(assmt)
return ret
def reaching_definitions(self):
"""Per-block reaching definitions analysis."""
dirty = True
while dirty:
dirty = False
for block in self.blocks:
i_input = 0
for parent in block.parents:
i_input |= parent.i_output
i_output = (i_input & ~block.i_kill) | block.i_gen
if i_output != block.i_output:
dirty = True
block.i_input = i_input
block.i_output = i_output
class LoopDescr(object):
def __init__(self, next_block, loop_block):
self.next_block = next_block
self.loop_block = loop_block
self.exceptions = []
class ExceptionDescr(object):
"""Exception handling helper.
entry_point ControlBlock Exception handling entry point
finally_enter ControlBlock Normal finally clause entry point
finally_exit ControlBlock Normal finally clause exit point
"""
def __init__(self, entry_point, finally_enter=None, finally_exit=None):
self.entry_point = entry_point
self.finally_enter = finally_enter
self.finally_exit = finally_exit
class NameAssignment(object):
def __init__(self, lhs, rhs, entry):
if lhs.cf_state is None:
lhs.cf_state = set()
self.lhs = lhs
self.rhs = rhs
self.entry = entry
self.pos = lhs.pos
self.refs = set()
self.is_arg = False
self.is_deletion = False
self.inferred_type = None
def __repr__(self):
return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
def infer_type(self):
self.inferred_type = self.rhs.infer_type(self.entry.scope)
return self.inferred_type
def type_dependencies(self):
return self.rhs.type_dependencies(self.entry.scope)
@property
def type(self):
if not self.entry.type.is_unspecified:
return self.entry.type
return self.inferred_type
class StaticAssignment(NameAssignment):
"""Initialised at declaration time, e.g. stack allocation."""
def __init__(self, entry):
if not entry.type.is_pyobject:
may_be_none = False
else:
may_be_none = None # unknown
lhs = TypedExprNode(
entry.type, may_be_none=may_be_none, pos=entry.pos)
super(StaticAssignment, self).__init__(lhs, lhs, entry)
def infer_type(self):
return self.entry.type
def type_dependencies(self):
return ()
class Argument(NameAssignment):
def __init__(self, lhs, rhs, entry):
NameAssignment.__init__(self, lhs, rhs, entry)
self.is_arg = True
class NameDeletion(NameAssignment):
def __init__(self, lhs, entry):
NameAssignment.__init__(self, lhs, lhs, entry)
self.is_deletion = True
def infer_type(self):
inferred_type = self.rhs.infer_type(self.entry.scope)
if (not inferred_type.is_pyobject
and inferred_type.can_coerce_to_pyobject(self.entry.scope)
and not inferred_type.is_cyp_class):
# do not coerce cypclass to pyobject just for a del statement
return PyrexTypes.py_object_type
self.inferred_type = inferred_type
return inferred_type
class Uninitialized(object):
"""Definitely not initialised yet."""
class Unknown(object):
"""Coming from outer closure, might be initialised or not."""
class NameReference(object):
def __init__(self, node, entry):
if node.cf_state is None:
node.cf_state = set()
self.node = node
self.entry = entry
self.pos = node.pos
def __repr__(self):
return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
class ControlFlowState(list):
# Keeps track of Node's entry assignments
#
# cf_is_null [boolean] It is uninitialized
# cf_maybe_null [boolean] May be uninitialized
# is_single [boolean] Has only one assignment at this point
cf_maybe_null = False
cf_is_null = False
is_single = False
def __init__(self, state):
if Uninitialized in state:
state.discard(Uninitialized)
self.cf_maybe_null = True
if not state:
self.cf_is_null = True
elif Unknown in state:
state.discard(Unknown)
self.cf_maybe_null = True
else:
if len(state) == 1:
self.is_single = True
# XXX: Remove fake_rhs_expr
super(ControlFlowState, self).__init__(
[i for i in state if i.rhs is not fake_rhs_expr])
def one(self):
return self[0]
class GVContext(object):
"""Graphviz subgraph object."""
def __init__(self):
self.blockids = {}
self.nextid = 0
self.children = []
self.sources = {}
def add(self, child):
self.children.append(child)
def nodeid(self, block):
if block not in self.blockids:
self.blockids[block] = 'block%d' % self.nextid
self.nextid += 1
return self.blockids[block]
def extract_sources(self, block):
if not block.positions:
return ''
start = min(block.positions)
stop = max(block.positions)
srcdescr = start[0]
if srcdescr not in self.sources:
self.sources[srcdescr] = list(srcdescr.get_lines())
lines = self.sources[srcdescr]
return '\\n'.join([l.strip() for l in lines[start[1] - 1:stop[1]]])
def render(self, fp, name, annotate_defs=False):
"""Render graphviz dot graph"""
fp.write('digraph %s {\n' % name)
fp.write(' node [shape=box];\n')
for child in self.children:
child.render(fp, self, annotate_defs)
fp.write('}\n')
def escape(self, text):
return text.replace('"', '\\"').replace('\n', '\\n')
class GV(object):
"""Graphviz DOT renderer."""
def __init__(self, name, flow):
self.name = name
self.flow = flow
def render(self, fp, ctx, annotate_defs=False):
fp.write(' subgraph %s {\n' % self.name)
for block in self.flow.blocks:
label = ctx.extract_sources(block)
if annotate_defs:
for stat in block.stats:
if isinstance(stat, NameAssignment):
label += '\n %s [%s %s]' % (
stat.entry.name, 'deletion' if stat.is_deletion else 'definition', stat.pos[1])
elif isinstance(stat, NameReference):
if stat.entry:
label += '\n %s [reference %s]' % (stat.entry.name, stat.pos[1])
if not label:
label = 'empty'
pid = ctx.nodeid(block)
fp.write(' %s [label="%s"];\n' % (pid, ctx.escape(label)))
for block in self.flow.blocks:
pid = ctx.nodeid(block)
for child in block.children:
fp.write(' %s -> %s;\n' % (pid, ctx.nodeid(child)))
fp.write(' }\n')
class MessageCollection(object):
"""Collect error/warnings messages first then sort"""
def __init__(self):
self.messages = set()
def error(self, pos, message):
self.messages.add((pos, True, message))
def warning(self, pos, message):
self.messages.add((pos, False, message))
def report(self):
for pos, is_error, message in sorted(self.messages):
if is_error:
error(pos, message)
else:
warning(pos, message, 2)
def check_definitions(flow, compiler_directives):
flow.initialize()
flow.reaching_definitions()
# Track down state
assignments = set()
# Node to entry map
references = {}
assmt_nodes = set()
for block in flow.blocks:
i_state = block.i_input
for stat in block.stats:
i_assmts = flow.assmts[stat.entry]
state = flow.map_one(i_state, stat.entry)
if isinstance(stat, NameAssignment):
stat.lhs.cf_state.update(state)
assmt_nodes.add(stat.lhs)
i_state = i_state & ~i_assmts.mask
if stat.is_deletion:
i_state |= i_assmts.bit
else:
i_state |= stat.bit
assignments.add(stat)
if stat.rhs is not fake_rhs_expr:
stat.entry.cf_assignments.append(stat)
elif isinstance(stat, NameReference):
references[stat.node] = stat.entry
stat.entry.cf_references.append(stat)
stat.node.cf_state.update(state)
## if not stat.node.allow_null:
## i_state &= ~i_assmts.bit
## # after successful read, the state is known to be initialised
state.discard(Uninitialized)
state.discard(Unknown)
for assmt in state:
assmt.refs.add(stat)
# Check variable usage
warn_maybe_uninitialized = compiler_directives['warn.maybe_uninitialized']
warn_unused_result = compiler_directives['warn.unused_result']
warn_unused = compiler_directives['warn.unused']
warn_unused_arg = compiler_directives['warn.unused_arg']
messages = MessageCollection()
# assignment hints
for node in assmt_nodes:
if Uninitialized in node.cf_state:
node.cf_maybe_null = True
if len(node.cf_state) == 1:
node.cf_is_null = True
else:
node.cf_is_null = False
elif Unknown in node.cf_state:
node.cf_maybe_null = True
else:
node.cf_is_null = False
node.cf_maybe_null = False
# Find uninitialized references and cf-hints
for node, entry in references.items():
if Uninitialized in node.cf_state:
node.cf_maybe_null = True
if not entry.from_closure and len(node.cf_state) == 1:
node.cf_is_null = True
if (node.allow_null or entry.from_closure
or entry.is_pyclass_attr or entry.type.is_error):
pass # Can be uninitialized here
elif node.cf_is_null:
if entry.error_on_uninitialized or (
Options.error_on_uninitialized and (
entry.type.is_pyobject or entry.type.is_unspecified)):
messages.error(
node.pos,
"local variable '%s' referenced before assignment"
% entry.name)
else:
messages.warning(
node.pos,
"local variable '%s' referenced before assignment"
% entry.name)
elif warn_maybe_uninitialized:
messages.warning(
node.pos,
"local variable '%s' might be referenced before assignment"
% entry.name)
elif Unknown in node.cf_state:
# TODO: better cross-closure analysis to know when inner functions
# are being called before a variable is being set, and when
# a variable is known to be set before even defining the
# inner function, etc.
node.cf_maybe_null = True
else:
node.cf_is_null = False
node.cf_maybe_null = False
# Unused result
for assmt in assignments:
if (not assmt.refs and not assmt.entry.is_pyclass_attr
and not assmt.entry.in_closure):
if assmt.entry.cf_references and warn_unused_result:
if assmt.is_arg:
messages.warning(assmt.pos, "Unused argument value '%s'" %
assmt.entry.name)
else:
messages.warning(assmt.pos, "Unused result in '%s'" %
assmt.entry.name)
assmt.lhs.cf_used = False
# Unused entries
for entry in flow.entries:
if (not entry.cf_references
and not entry.is_pyclass_attr):
if entry.name != '_' and not entry.name.startswith('unused'):
# '_' is often used for unused variables, e.g. in loops
if entry.is_arg:
if warn_unused_arg:
messages.warning(entry.pos, "Unused argument '%s'" %
entry.name)
else:
if warn_unused:
messages.warning(entry.pos, "Unused entry '%s'" %
entry.name)
entry.cf_used = False
messages.report()
for node in assmt_nodes:
node.cf_state = ControlFlowState(node.cf_state)
for node in references:
node.cf_state = ControlFlowState(node.cf_state)
class AssignmentCollector(TreeVisitor):
def __init__(self):
super(AssignmentCollector, self).__init__()
self.assignments = []
def visit_Node(self):
self._visitchildren(self, None)
def visit_SingleAssignmentNode(self, node):
self.assignments.append((node.lhs, node.rhs))
def visit_CascadedAssignmentNode(self, node):
for lhs in node.lhs_list:
self.assignments.append((lhs, node.rhs))
class ControlFlowAnalysis(CythonTransform):
def visit_ModuleNode(self, node):
dot_output = self.current_directives['control_flow.dot_output']
self.gv_ctx = GVContext() if dot_output else None
self.constant_folder = ConstantFolding()
# Set of NameNode reductions
self.reductions = set()
self.in_inplace_assignment = False
self.env_stack = []
self.env = node.scope
self.stack = []
self.flow = ControlFlow()
self.object_expr = TypedExprNode(PyrexTypes.py_object_type, may_be_none=True)
self.visitchildren(node)
check_definitions(self.flow, self.current_directives)
if dot_output:
annotate_defs = self.current_directives['control_flow.dot_annotate_defs']
with open(dot_output, 'wt') as fp:
self.gv_ctx.render(fp, 'module', annotate_defs=annotate_defs)
return node
def visit_FuncDefNode(self, node):
for arg in node.args:
if arg.default:
self.visitchildren(arg)
self.visitchildren(node, ('decorators',))
self.env_stack.append(self.env)
self.env = node.local_scope
self.stack.append(self.flow)
self.flow = ControlFlow()
# Collect all entries
for entry in node.local_scope.entries.values():
if self.flow.is_tracked(entry):
self.flow.entries.add(entry)
self.mark_position(node)
# Function body block
self.flow.nextblock()
for arg in node.args:
self._visit(arg)
if node.star_arg:
self.flow.mark_argument(node.star_arg,
TypedExprNode(Builtin.tuple_type,
may_be_none=False),
node.star_arg.entry)
if node.starstar_arg:
self.flow.mark_argument(node.starstar_arg,
TypedExprNode(Builtin.dict_type,
may_be_none=False),
node.starstar_arg.entry)
self._visit(node.body)
# Workaround for generators
if node.is_generator:
self._visit(node.gbody.body)
# Exit point
if self.flow.block:
self.flow.block.add_child(self.flow.exit_point)
# Cleanup graph
self.flow.normalize()
check_definitions(self.flow, self.current_directives)
self.flow.blocks.add(self.flow.entry_point)
if self.gv_ctx is not None:
self.gv_ctx.add(GV(node.local_scope.name, self.flow))
self.flow = self.stack.pop()
self.env = self.env_stack.pop()
return node
def visit_DefNode(self, node):
node.used = True
return self.visit_FuncDefNode(node)
def visit_GeneratorBodyDefNode(self, node):
return node
def visit_CTypeDefNode(self, node):
return node
def mark_assignment(self, lhs, rhs=None):
if not self.flow.block:
return
if self.flow.exceptions:
exc_descr = self.flow.exceptions[-1]
self.flow.block.add_child(exc_descr.entry_point)
self.flow.nextblock()
if not rhs:
rhs = self.object_expr
if lhs.is_name:
if lhs.entry is not None:
entry = lhs.entry
else:
entry = self.env.lookup(lhs.name)
if entry is None: # TODO: This shouldn't happen...
return
self.flow.mark_assignment(lhs, rhs, entry)
elif lhs.is_sequence_constructor:
for i, arg in enumerate(lhs.args):
if arg.is_starred:
# "a, *b = x" assigns a list to "b"
item_node = TypedExprNode(Builtin.list_type, may_be_none=False, pos=arg.pos)
elif rhs is self.object_expr:
item_node = rhs
else:
item_node = rhs.inferable_item_node(i)
self.mark_assignment(arg, item_node)
else:
self._visit(lhs)
if self.flow.exceptions:
exc_descr = self.flow.exceptions[-1]
self.flow.block.add_child(exc_descr.entry_point)
self.flow.nextblock()
def mark_position(self, node):
"""Mark position if DOT output is enabled."""
if self.current_directives['control_flow.dot_output']:
self.flow.mark_position(node)
def visit_FromImportStatNode(self, node):
for name, target in node.items:
if name != "*":
self.mark_assignment(target)
self.visitchildren(node)
return node
def visit_AssignmentNode(self, node):
raise InternalError("Unhandled assignment node %s" % type(node))
def visit_SingleAssignmentNode(self, node):
self._visit(node.rhs)
self.mark_assignment(node.lhs, node.rhs)
return node
def visit_CascadedAssignmentNode(self, node):
self._visit(node.rhs)
for lhs in node.lhs_list:
self.mark_assignment(lhs, node.rhs)
return node
def visit_ParallelAssignmentNode(self, node):
collector = AssignmentCollector()
collector.visitchildren(node)
for lhs, rhs in collector.assignments:
self._visit(rhs)
for lhs, rhs in collector.assignments:
self.mark_assignment(lhs, rhs)
return node
def visit_InPlaceAssignmentNode(self, node):
self.in_inplace_assignment = True
self.visitchildren(node)
self.in_inplace_assignment = False
self.mark_assignment(node.lhs, self.constant_folder(node.create_binop_node()))
return node
def visit_DelStatNode(self, node):
for arg in node.args:
if arg.is_name:
entry = arg.entry or self.env.lookup(arg.name)
if entry.in_closure or entry.from_closure:
error(arg.pos,
"can not delete variable '%s' "
"referenced in nested scope" % entry.name)
if not node.ignore_nonexisting:
self._visit(arg) # mark reference
self.flow.mark_deletion(arg, entry)
else:
self._visit(arg)
return node
def visit_CArgDeclNode(self, node):
entry = self.env.lookup(node.name)
if entry:
may_be_none = not node.not_none
self.flow.mark_argument(
node, TypedExprNode(entry.type, may_be_none), entry)
return node
def visit_NameNode(self, node):
if self.flow.block:
entry = node.entry or self.env.lookup(node.name)
if entry:
self.flow.mark_reference(node, entry)
if entry in self.reductions and not self.in_inplace_assignment:
error(node.pos,
"Cannot read reduction variable in loop body")
return node
def visit_StatListNode(self, node):
if self.flow.block:
for stat in node.stats:
self._visit(stat)
if not self.flow.block:
stat.is_terminator = True
break
return node
def visit_Node(self, node):
self.visitchildren(node)
self.mark_position(node)
return node
def visit_SizeofVarNode(self, node):
return node
def visit_TypeidNode(self, node):
return node
def visit_IfStatNode(self, node):
next_block = self.flow.newblock()
parent = self.flow.block
# If clauses
for clause in node.if_clauses:
parent = self.flow.nextblock(parent)
self._visit(clause.condition)
self.flow.nextblock()
self._visit(clause.body)
if self.flow.block:
self.flow.block.add_child(next_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=parent)
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
parent.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_AssertStatNode(self, node):
"""Essentially an if-condition that wraps a RaiseStatNode.
"""
self.mark_position(node)
next_block = self.flow.newblock()
parent = self.flow.block
# failure case
parent = self.flow.nextblock(parent)
self._visit(node.condition)
self.flow.nextblock()
self._visit(node.exception)
if self.flow.block:
self.flow.block.add_child(next_block)
parent.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_WhileStatNode(self, node):
condition_block = self.flow.nextblock()
next_block = self.flow.newblock()
# Condition block
self.flow.loops.append(LoopDescr(next_block, condition_block))
if node.condition:
self._visit(node.condition)
# Body block
self.flow.nextblock()
self._visit(node.body)
self.flow.loops.pop()
# Loop it
if self.flow.block:
self.flow.block.add_child(condition_block)
self.flow.block.add_child(next_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=condition_block)
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
condition_block.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def mark_forloop_target(self, node):
# TODO: Remove redundancy with range optimization...
is_special = False
sequence = node.iterator.sequence
target = node.target
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = self.env.lookup(function.name)
if not entry or entry.is_builtin:
if function.name == 'reversed' and len(sequence.args) == 1:
sequence = sequence.args[0]
elif function.name == 'enumerate' and len(sequence.args) == 1:
if target.is_sequence_constructor and len(target.args) == 2:
iterator = sequence.args[0]
if iterator.is_name:
iterator_type = iterator.infer_type(self.env)
if iterator_type.is_builtin_type:
# assume that builtin types have a length within Py_ssize_t
self.mark_assignment(
target.args[0],
ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
target = target.args[1]
sequence = sequence.args[0]
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = self.env.lookup(function.name)
if not entry or entry.is_builtin:
if function.name in ('range', 'xrange'):
is_special = True
for arg in sequence.args[:2]:
self.mark_assignment(target, arg)
if len(sequence.args) > 2:
self.mark_assignment(target, self.constant_folder(
ExprNodes.binop_node(node.pos,
'+',
sequence.args[0],
sequence.args[2])))
if not is_special:
# A for-loop basically translates to subsequent calls to
# __getitem__(), so using an IndexNode here allows us to
# naturally infer the base type of pointers, C arrays,
# Python strings, etc., while correctly falling back to an
# object type when the base type cannot be handled.
self.mark_assignment(target, node.item)
def visit_AsyncForStatNode(self, node):
return self.visit_ForInStatNode(node)
def visit_ForInStatNode(self, node):
condition_block = self.flow.nextblock()
next_block = self.flow.newblock()
# Condition with iterator
self.flow.loops.append(LoopDescr(next_block, condition_block))
self._visit(node.iterator)
# Target assignment
self.flow.nextblock()
if isinstance(node, Nodes.ForInStatNode):
self.mark_forloop_target(node)
elif isinstance(node, Nodes.AsyncForStatNode):
# not entirely correct, but good enough for now
self.mark_assignment(node.target, node.item)
else: # Parallel
self.mark_assignment(node.target)
# Body block
if isinstance(node, Nodes.ParallelRangeNode):
# In case of an invalid
self._delete_privates(node, exclude=node.target.entry)
self.flow.nextblock()
self._visit(node.body)
self.flow.loops.pop()
# Loop it
if self.flow.block:
self.flow.block.add_child(condition_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=condition_block)
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
condition_block.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def _delete_privates(self, node, exclude=None):
for private_node in node.assigned_nodes:
if not exclude or private_node.entry is not exclude:
self.flow.mark_deletion(private_node, private_node.entry)
def visit_ParallelRangeNode(self, node):
reductions = self.reductions
# if node.target is None or not a NameNode, an error will have
# been previously issued
if hasattr(node.target, 'entry'):
self.reductions = set(reductions)
for private_node in node.assigned_nodes:
private_node.entry.error_on_uninitialized = True
pos, reduction = node.assignments[private_node.entry]
if reduction:
self.reductions.add(private_node.entry)
node = self.visit_ForInStatNode(node)
self.reductions = reductions
return node
def visit_ParallelWithBlockNode(self, node):
for private_node in node.assigned_nodes:
private_node.entry.error_on_uninitialized = True
self._delete_privates(node)
self.visitchildren(node)
self._delete_privates(node)
return node
def visit_ForFromStatNode(self, node):
condition_block = self.flow.nextblock()
next_block = self.flow.newblock()
# Condition with iterator
self.flow.loops.append(LoopDescr(next_block, condition_block))
self._visit(node.bound1)
self._visit(node.bound2)
if node.step is not None:
self._visit(node.step)
# Target assignment
self.flow.nextblock()
self.mark_assignment(node.target, node.bound1)
if node.step is not None:
self.mark_assignment(node.target, self.constant_folder(
ExprNodes.binop_node(node.pos, '+', node.bound1, node.step)))
# Body block
self.flow.nextblock()
self._visit(node.body)
self.flow.loops.pop()
# Loop it
if self.flow.block:
self.flow.block.add_child(condition_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=condition_block)
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
condition_block.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_LoopNode(self, node):
raise InternalError("Generic loops are not supported")
def visit_WithTargetAssignmentStatNode(self, node):
self.mark_assignment(node.lhs, node.with_node.enter_call)
return node
def visit_WithStatNode(self, node):
self._visit(node.manager)
self._visit(node.enter_call)
self._visit(node.body)
return node
def visit_TryExceptStatNode(self, node):
# After exception handling
next_block = self.flow.newblock()
# Body block
self.flow.newblock()
# Exception entry point
entry_point = self.flow.newblock()
self.flow.exceptions.append(ExceptionDescr(entry_point))
self.flow.nextblock()
## XXX: links to exception handling point should be added by
## XXX: children nodes
self.flow.block.add_child(entry_point)
self.flow.nextblock()
self._visit(node.body)
self.flow.exceptions.pop()
# After exception
if self.flow.block:
if node.else_clause:
self.flow.nextblock()
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
for clause in node.except_clauses:
self.flow.block = entry_point
if clause.pattern:
for pattern in clause.pattern:
self._visit(pattern)
else:
# TODO: handle * pattern
pass
entry_point = self.flow.newblock(parent=self.flow.block)
self.flow.nextblock()
if clause.target:
self.mark_assignment(clause.target)
self._visit(clause.body)
if self.flow.block:
self.flow.block.add_child(next_block)
if self.flow.exceptions:
entry_point.add_child(self.flow.exceptions[-1].entry_point)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_TryFinallyStatNode(self, node):
body_block = self.flow.nextblock()
# Exception entry point
entry_point = self.flow.newblock()
self.flow.block = entry_point
self._visit(node.finally_except_clause)
if self.flow.block and self.flow.exceptions:
self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
# Normal execution
finally_enter = self.flow.newblock()
self.flow.block = finally_enter
self._visit(node.finally_clause)
finally_exit = self.flow.block
descr = ExceptionDescr(entry_point, finally_enter, finally_exit)
self.flow.exceptions.append(descr)
if self.flow.loops:
self.flow.loops[-1].exceptions.append(descr)
self.flow.block = body_block
body_block.add_child(entry_point)
self.flow.nextblock()
self._visit(node.body)
self.flow.exceptions.pop()
if self.flow.loops:
self.flow.loops[-1].exceptions.pop()
if self.flow.block:
self.flow.block.add_child(finally_enter)
if finally_exit:
self.flow.block = self.flow.nextblock(parent=finally_exit)
else:
self.flow.block = None
return node
def visit_RaiseStatNode(self, node):
self.mark_position(node)
self.visitchildren(node)
if self.flow.exceptions:
self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
self.flow.block = None
return node
def visit_ReraiseStatNode(self, node):
self.mark_position(node)
if self.flow.exceptions:
self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
self.flow.block = None
return node
def visit_ReturnStatNode(self, node):
self.mark_position(node)
self.visitchildren(node)
outer_exception_handlers = iter(self.flow.exceptions[::-1])
for handler in outer_exception_handlers:
if handler.finally_enter:
self.flow.block.add_child(handler.finally_enter)
if handler.finally_exit:
# 'return' goes to function exit, or to the next outer 'finally' clause
exit_point = self.flow.exit_point
for next_handler in outer_exception_handlers:
if next_handler.finally_enter:
exit_point = next_handler.finally_enter
break
handler.finally_exit.add_child(exit_point)
break
else:
if self.flow.block:
self.flow.block.add_child(self.flow.exit_point)
self.flow.block = None
return node
def visit_BreakStatNode(self, node):
if not self.flow.loops:
#error(node.pos, "break statement not inside loop")
return node
loop = self.flow.loops[-1]
self.mark_position(node)
for exception in loop.exceptions[::-1]:
if exception.finally_enter:
self.flow.block.add_child(exception.finally_enter)
if exception.finally_exit:
exception.finally_exit.add_child(loop.next_block)
break
else:
self.flow.block.add_child(loop.next_block)
self.flow.block = None
return node
def visit_ContinueStatNode(self, node):
if not self.flow.loops:
#error(node.pos, "continue statement not inside loop")
return node
loop = self.flow.loops[-1]
self.mark_position(node)
for exception in loop.exceptions[::-1]:
if exception.finally_enter:
self.flow.block.add_child(exception.finally_enter)
if exception.finally_exit:
exception.finally_exit.add_child(loop.loop_block)
break
else:
self.flow.block.add_child(loop.loop_block)
self.flow.block = None
return node
def visit_ComprehensionNode(self, node):
if node.expr_scope:
self.env_stack.append(self.env)
self.env = node.expr_scope
# Skip append node here
self._visit(node.loop)
if node.expr_scope:
self.env = self.env_stack.pop()
return node
def visit_ScopedExprNode(self, node):
if node.expr_scope:
self.env_stack.append(self.env)
self.env = node.expr_scope
self.visitchildren(node)
if node.expr_scope:
self.env = self.env_stack.pop()
return node
def visit_PyClassDefNode(self, node):
self.visitchildren(node, ('dict', 'metaclass',
'mkw', 'bases', 'class_result'))
self.flow.mark_assignment(node.target, node.classobj,
self.env.lookup(node.target.name))
self.env_stack.append(self.env)
self.env = node.scope
self.flow.nextblock()
if node.doc_node:
self.flow.mark_assignment(node.doc_node, fake_rhs_expr, node.doc_node.entry)
self.visitchildren(node, ('body',))
self.flow.nextblock()
self.env = self.env_stack.pop()
return node
def visit_AmpersandNode(self, node):
if node.operand.is_name:
# Fake assignment to silence warning
self.mark_assignment(node.operand, fake_rhs_expr)
self.visitchildren(node)
return node
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/FusedNode.py 0000664 0000000 0000000 00000116044 13766135517 0030525 0 ustar 00root root 0000000 0000000 from __future__ import absolute_import
import copy
from . import (ExprNodes, PyrexTypes, MemoryView,
ParseTreeTransforms, StringEncoding, Errors)
from .ExprNodes import CloneNode, ProxyNode, TupleNode
from .Nodes import FuncDefNode, CFuncDefNode, StatListNode, DefNode
from ..Utils import OrderedSet
class FusedCFuncDefNode(StatListNode):
"""
This node replaces a function with fused arguments. It deep-copies the
function for every permutation of fused types, and allocates a new local
scope for it. It keeps track of the original function in self.node, and
the entry of the original function in the symbol table is given the
'fused_cfunction' attribute which points back to us.
Then when a function lookup occurs (to e.g. call it), the call can be
dispatched to the right function.
node FuncDefNode the original function
nodes [FuncDefNode] list of copies of node with different specific types
py_func DefNode the fused python function subscriptable from
Python space
__signatures__ A DictNode mapping signature specialization strings
to PyCFunction nodes
resulting_fused_function PyCFunction for the fused DefNode that delegates
to specializations
fused_func_assignment Assignment of the fused function to the function name
defaults_tuple TupleNode of defaults (letting PyCFunctionNode build
defaults would result in many different tuples)
specialized_pycfuncs List of synthesized pycfunction nodes for the
specializations
code_object CodeObjectNode shared by all specializations and the
fused function
fused_compound_types All fused (compound) types (e.g. floating[:])
"""
__signatures__ = None
resulting_fused_function = None
fused_func_assignment = None
defaults_tuple = None
decorators = None
child_attrs = StatListNode.child_attrs + [
'__signatures__', 'resulting_fused_function', 'fused_func_assignment']
def __init__(self, node, env):
super(FusedCFuncDefNode, self).__init__(node.pos)
self.nodes = []
self.node = node
is_def = isinstance(self.node, DefNode)
if is_def:
# self.node.decorators = []
self.copy_def(env)
else:
self.copy_cdef(env)
# Perform some sanity checks. If anything fails, it's a bug
for n in self.nodes:
assert not n.entry.type.is_fused
assert not n.local_scope.return_type.is_fused
if node.return_type.is_fused:
assert not n.return_type.is_fused
if not is_def and n.cfunc_declarator.optional_arg_count:
assert n.type.op_arg_struct
node.entry.fused_cfunction = self
# Copy the nodes as AnalyseDeclarationsTransform will prepend
# self.py_func to self.stats, as we only want specialized
# CFuncDefNodes in self.nodes
self.stats = self.nodes[:]
def copy_def(self, env):
"""
Create a copy of the original def or lambda function for specialized
versions.
"""
fused_compound_types = PyrexTypes.unique(
[arg.type for arg in self.node.args if arg.type.is_fused])
fused_types = self._get_fused_base_types(fused_compound_types)
permutations = PyrexTypes.get_all_specialized_permutations(fused_types)
self.fused_compound_types = fused_compound_types
if self.node.entry in env.pyfunc_entries:
env.pyfunc_entries.remove(self.node.entry)
for cname, fused_to_specific in permutations:
copied_node = copy.deepcopy(self.node)
# keep signature object identity for special casing in DefNode.analyse_declarations()
copied_node.entry.signature = self.node.entry.signature
self._specialize_function_args(copied_node.args, fused_to_specific)
copied_node.return_type = self.node.return_type.specialize(
fused_to_specific)
copied_node.analyse_declarations(env)
# copied_node.is_staticmethod = self.node.is_staticmethod
# copied_node.is_classmethod = self.node.is_classmethod
self.create_new_local_scope(copied_node, env, fused_to_specific)
self.specialize_copied_def(copied_node, cname, self.node.entry,
fused_to_specific, fused_compound_types)
PyrexTypes.specialize_entry(copied_node.entry, cname)
copied_node.entry.used = True
env.entries[copied_node.entry.name] = copied_node.entry
if not self.replace_fused_typechecks(copied_node):
break
self.orig_py_func = self.node
self.py_func = self.make_fused_cpdef(self.node, env, is_def=True)
def copy_cdef(self, env):
"""
Create a copy of the original c(p)def function for all specialized
versions.
"""
permutations = self.node.type.get_all_specialized_permutations()
# print 'Node %s has %d specializations:' % (self.node.entry.name,
# len(permutations))
# import pprint; pprint.pprint([d for cname, d in permutations])
# Prevent copying of the python function
self.orig_py_func = orig_py_func = self.node.py_func
self.node.py_func = None
if orig_py_func:
env.pyfunc_entries.remove(orig_py_func.entry)
fused_types = self.node.type.get_fused_types()
self.fused_compound_types = fused_types
new_cfunc_entries = []
for cname, fused_to_specific in permutations:
copied_node = copy.deepcopy(self.node)
# Make the types in our CFuncType specific.
type = copied_node.type.specialize(fused_to_specific)
entry = copied_node.entry
type.specialize_entry(entry, cname)
# Reuse existing Entries (e.g. from .pxd files).
for i, orig_entry in enumerate(env.cfunc_entries):
if entry.cname == orig_entry.cname and type.same_as_resolved_type(orig_entry.type):
copied_node.entry = env.cfunc_entries[i]
if not copied_node.entry.func_cname:
copied_node.entry.func_cname = entry.func_cname
entry = copied_node.entry
type = entry.type
break
else:
new_cfunc_entries.append(entry)
copied_node.type = type
entry.type, type.entry = type, entry
entry.used = (entry.used or
self.node.entry.defined_in_pxd or
env.is_c_class_scope or
entry.is_cmethod)
if self.node.cfunc_declarator.optional_arg_count:
self.node.cfunc_declarator.declare_optional_arg_struct(
type, env, fused_cname=cname)
copied_node.return_type = type.return_type
self.create_new_local_scope(copied_node, env, fused_to_specific)
# Make the argument types in the CFuncDeclarator specific
self._specialize_function_args(copied_node.cfunc_declarator.args,
fused_to_specific)
# If a cpdef, declare all specialized cpdefs (this
# also calls analyse_declarations)
copied_node.declare_cpdef_wrapper(env)
if copied_node.py_func:
env.pyfunc_entries.remove(copied_node.py_func.entry)
self.specialize_copied_def(
copied_node.py_func, cname, self.node.entry.as_variable,
fused_to_specific, fused_types)
if not self.replace_fused_typechecks(copied_node):
break
# replace old entry with new entries
try:
cindex = env.cfunc_entries.index(self.node.entry)
except ValueError:
env.cfunc_entries.extend(new_cfunc_entries)
else:
env.cfunc_entries[cindex:cindex+1] = new_cfunc_entries
if orig_py_func:
self.py_func = self.make_fused_cpdef(orig_py_func, env,
is_def=False)
else:
self.py_func = orig_py_func
def _get_fused_base_types(self, fused_compound_types):
"""
Get a list of unique basic fused types, from a list of
(possibly) compound fused types.
"""
base_types = []
seen = set()
for fused_type in fused_compound_types:
fused_type.get_fused_types(result=base_types, seen=seen)
return base_types
def _specialize_function_args(self, args, fused_to_specific):
for arg in args:
if arg.type.is_fused:
arg.type = arg.type.specialize(fused_to_specific)
if arg.type.is_memoryviewslice:
arg.type.validate_memslice_dtype(arg.pos)
if arg.annotation:
# TODO might be nice if annotations were specialized instead?
# (Or might be hard to do reliably)
arg.annotation.untyped = True
def create_new_local_scope(self, node, env, f2s):
"""
Create a new local scope for the copied node and append it to
self.nodes. A new local scope is needed because the arguments with the
fused types are already in the local scope, and we need the specialized
entries created after analyse_declarations on each specialized version
of the (CFunc)DefNode.
f2s is a dict mapping each fused type to its specialized version
"""
node.create_local_scope(env)
node.local_scope.fused_to_specific = f2s
# This is copied from the original function, set it to false to
# stop recursion
node.has_fused_arguments = False
self.nodes.append(node)
def specialize_copied_def(self, node, cname, py_entry, f2s, fused_compound_types):
"""Specialize the copy of a DefNode given the copied node,
the specialization cname and the original DefNode entry"""
fused_types = self._get_fused_base_types(fused_compound_types)
type_strings = [
PyrexTypes.specialization_signature_string(fused_type, f2s)
for fused_type in fused_types
]
node.specialized_signature_string = '|'.join(type_strings)
node.entry.pymethdef_cname = PyrexTypes.get_fused_cname(
cname, node.entry.pymethdef_cname)
node.entry.doc = py_entry.doc
node.entry.doc_cname = py_entry.doc_cname
def replace_fused_typechecks(self, copied_node):
"""
Branch-prune fused type checks like
if fused_t is int:
...
Returns whether an error was issued and whether we should stop in
in order to prevent a flood of errors.
"""
num_errors = Errors.num_errors
transform = ParseTreeTransforms.ReplaceFusedTypeChecks(
copied_node.local_scope)
transform(copied_node)
if Errors.num_errors > num_errors:
return False
return True
def _fused_instance_checks(self, normal_types, pyx_code, env):
"""
Generate Cython code for instance checks, matching an object to
specialized types.
"""
for specialized_type in normal_types:
# all_numeric = all_numeric and specialized_type.is_numeric
pyx_code.context.update(
py_type_name=specialized_type.py_type_name(),
specialized_type_name=specialized_type.specialization_string,
)
pyx_code.put_chunk(
u"""
if isinstance(arg, {{py_type_name}}):
dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'; break
""")
def _dtype_name(self, dtype):
if dtype.is_typedef:
return '___pyx_%s' % dtype
return str(dtype).replace(' ', '_')
def _dtype_type(self, dtype):
if dtype.is_typedef:
return self._dtype_name(dtype)
return str(dtype)
def _sizeof_dtype(self, dtype):
if dtype.is_pyobject:
return 'sizeof(void *)'
else:
return "sizeof(%s)" % self._dtype_type(dtype)
def _buffer_check_numpy_dtype_setup_cases(self, pyx_code):
"Setup some common cases to match dtypes against specializations"
if pyx_code.indenter("if kind in b'iu':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_int")
pyx_code.dedent()
if pyx_code.indenter("elif kind == b'f':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_float")
pyx_code.dedent()
if pyx_code.indenter("elif kind == b'c':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_complex")
pyx_code.dedent()
if pyx_code.indenter("elif kind == b'O':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_object")
pyx_code.dedent()
match = "dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'"
no_match = "dest_sig[{{dest_sig_idx}}] = None"
def _buffer_check_numpy_dtype(self, pyx_code, specialized_buffer_types, pythran_types):
"""
Match a numpy dtype object to the individual specializations.
"""
self._buffer_check_numpy_dtype_setup_cases(pyx_code)
for specialized_type in pythran_types+specialized_buffer_types:
final_type = specialized_type
if specialized_type.is_pythran_expr:
specialized_type = specialized_type.org_buffer
dtype = specialized_type.dtype
pyx_code.context.update(
itemsize_match=self._sizeof_dtype(dtype) + " == itemsize",
signed_match="not (%s_is_signed ^ dtype_signed)" % self._dtype_name(dtype),
dtype=dtype,
specialized_type_name=final_type.specialization_string)
dtypes = [
(dtype.is_int, pyx_code.dtype_int),
(dtype.is_float, pyx_code.dtype_float),
(dtype.is_complex, pyx_code.dtype_complex)
]
for dtype_category, codewriter in dtypes:
if dtype_category:
cond = '{{itemsize_match}} and (arg.ndim) == %d' % (
specialized_type.ndim,)
if dtype.is_int:
cond += ' and {{signed_match}}'
if final_type.is_pythran_expr:
cond += ' and arg_is_pythran_compatible'
if codewriter.indenter("if %s:" % cond):
#codewriter.putln("print 'buffer match found based on numpy dtype'")
codewriter.putln(self.match)
codewriter.putln("break")
codewriter.dedent()
def _buffer_parse_format_string_check(self, pyx_code, decl_code,
specialized_type, env):
"""
For each specialized type, try to coerce the object to a memoryview
slice of that type. This means obtaining a buffer and parsing the
format string.
TODO: separate buffer acquisition from format parsing
"""
dtype = specialized_type.dtype
if specialized_type.is_buffer:
axes = [('direct', 'strided')] * specialized_type.ndim
else:
axes = specialized_type.axes
memslice_type = PyrexTypes.MemoryViewSliceType(dtype, axes)
memslice_type.create_from_py_utility_code(env)
pyx_code.context.update(
coerce_from_py_func=memslice_type.from_py_function,
dtype=dtype)
decl_code.putln(
"{{memviewslice_cname}} {{coerce_from_py_func}}(object, int)")
pyx_code.context.update(
specialized_type_name=specialized_type.specialization_string,
sizeof_dtype=self._sizeof_dtype(dtype))
pyx_code.put_chunk(
u"""
# try {{dtype}}
if itemsize == -1 or itemsize == {{sizeof_dtype}}:
memslice = {{coerce_from_py_func}}(arg, 0)
if memslice.memview:
__PYX_XDEC_MEMVIEW(&memslice, 1)
# print 'found a match for the buffer through format parsing'
%s
break
else:
__pyx_PyErr_Clear()
""" % self.match)
def _buffer_checks(self, buffer_types, pythran_types, pyx_code, decl_code, env):
"""
Generate Cython code to match objects to buffer specializations.
First try to get a numpy dtype object and match it against the individual
specializations. If that fails, try naively to coerce the object
to each specialization, which obtains the buffer each time and tries
to match the format string.
"""
# The first thing to find a match in this loop breaks out of the loop
pyx_code.put_chunk(
u"""
""" + (u"arg_is_pythran_compatible = False" if pythran_types else u"") + u"""
if ndarray is not None:
if isinstance(arg, ndarray):
dtype = arg.dtype
""" + (u"arg_is_pythran_compatible = True" if pythran_types else u"") + u"""
elif __pyx_memoryview_check(arg):
arg_base = arg.base
if isinstance(arg_base, ndarray):
dtype = arg_base.dtype
else:
dtype = None
else:
dtype = None
itemsize = -1
if dtype is not None:
itemsize = dtype.itemsize
kind = ord(dtype.kind)
dtype_signed = kind == 'i'
""")
pyx_code.indent(2)
if pythran_types:
pyx_code.put_chunk(
u"""
# Pythran only supports the endianness of the current compiler
byteorder = dtype.byteorder
if byteorder == "<" and not __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False
elif byteorder == ">" and __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False
if arg_is_pythran_compatible:
cur_stride = itemsize
shape = arg.shape
strides = arg.strides
for i in range(arg.ndim-1, -1, -1):
if (strides[i]) != cur_stride:
arg_is_pythran_compatible = False
break
cur_stride *= shape[i]
else:
arg_is_pythran_compatible = not (arg.flags.f_contiguous and (arg.ndim) > 1)
""")
pyx_code.named_insertion_point("numpy_dtype_checks")
self._buffer_check_numpy_dtype(pyx_code, buffer_types, pythran_types)
pyx_code.dedent(2)
for specialized_type in buffer_types:
self._buffer_parse_format_string_check(
pyx_code, decl_code, specialized_type, env)
def _buffer_declarations(self, pyx_code, decl_code, all_buffer_types, pythran_types):
"""
If we have any buffer specializations, write out some variable
declarations and imports.
"""
decl_code.put_chunk(
u"""
ctypedef struct {{memviewslice_cname}}:
void *memview
void __PYX_XDEC_MEMVIEW({{memviewslice_cname}} *, int have_gil)
bint __pyx_memoryview_check(object)
""")
pyx_code.local_variable_declarations.put_chunk(
u"""
cdef {{memviewslice_cname}} memslice
cdef Py_ssize_t itemsize
cdef bint dtype_signed
cdef char kind
itemsize = -1
""")
if pythran_types:
pyx_code.local_variable_declarations.put_chunk(u"""
cdef bint arg_is_pythran_compatible
cdef Py_ssize_t cur_stride
""")
pyx_code.imports.put_chunk(
u"""
cdef type ndarray
ndarray = __Pyx_ImportNumPyArrayTypeIfAvailable()
""")
seen_typedefs = set()
seen_int_dtypes = set()
for buffer_type in all_buffer_types:
dtype = buffer_type.dtype
dtype_name = self._dtype_name(dtype)
if dtype.is_typedef:
if dtype_name not in seen_typedefs:
seen_typedefs.add(dtype_name)
decl_code.putln(
'ctypedef %s %s "%s"' % (dtype.resolve(), dtype_name,
dtype.empty_declaration_code()))
if buffer_type.dtype.is_int:
if str(dtype) not in seen_int_dtypes:
seen_int_dtypes.add(str(dtype))
pyx_code.context.update(dtype_name=dtype_name,
dtype_type=self._dtype_type(dtype))
pyx_code.local_variable_declarations.put_chunk(
u"""
cdef bint {{dtype_name}}_is_signed
{{dtype_name}}_is_signed = not (<{{dtype_type}}> -1 > 0)
""")
def _split_fused_types(self, arg):
"""
Specialize fused types and split into normal types and buffer types.
"""
specialized_types = PyrexTypes.get_specialized_types(arg.type)
# Prefer long over int, etc by sorting (see type classes in PyrexTypes.py)
specialized_types.sort()
seen_py_type_names = set()
normal_types, buffer_types, pythran_types = [], [], []
has_object_fallback = False
for specialized_type in specialized_types:
py_type_name = specialized_type.py_type_name()
if py_type_name:
if py_type_name in seen_py_type_names:
continue
seen_py_type_names.add(py_type_name)
if py_type_name == 'object':
has_object_fallback = True
else:
normal_types.append(specialized_type)
elif specialized_type.is_pythran_expr:
pythran_types.append(specialized_type)
elif specialized_type.is_buffer or specialized_type.is_memoryviewslice:
buffer_types.append(specialized_type)
return normal_types, buffer_types, pythran_types, has_object_fallback
def _unpack_argument(self, pyx_code):
pyx_code.put_chunk(
u"""
# PROCESSING ARGUMENT {{arg_tuple_idx}}
if {{arg_tuple_idx}} < len(args):
arg = (args)[{{arg_tuple_idx}}]
elif kwargs is not None and '{{arg.name}}' in kwargs:
arg = (kwargs)['{{arg.name}}']
else:
{{if arg.default}}
arg = (defaults)[{{default_idx}}]
{{else}}
{{if arg_tuple_idx < min_positional_args}}
raise TypeError("Expected at least %d argument%s, got %d" % (
{{min_positional_args}}, {{'"s"' if min_positional_args != 1 else '""'}}, len(args)))
{{else}}
raise TypeError("Missing keyword-only argument: '%s'" % "{{arg.default}}")
{{endif}}
{{endif}}
""")
def _fused_signature_index(self, pyx_code):
"""
Generate Cython code for constructing a persistent nested dictionary index of
fused type specialization signatures.
"""
pyx_code.put_chunk(
u"""
if not _fused_sigindex:
for sig in signatures:
sigindex_node = _fused_sigindex
*sig_series, last_type = sig.strip('()').split('|')
for sig_type in sig_series:
if sig_type not in sigindex_node:
sigindex_node[sig_type] = sigindex_node = {}
else:
sigindex_node = sigindex_node[sig_type]
sigindex_node[last_type] = sig
"""
)
def make_fused_cpdef(self, orig_py_func, env, is_def):
"""
This creates the function that is indexable from Python and does
runtime dispatch based on the argument types. The function gets the
arg tuple and kwargs dict (or None) and the defaults tuple
as arguments from the Binding Fused Function's tp_call.
"""
from . import TreeFragment, Code, UtilityCode
fused_types = self._get_fused_base_types([
arg.type for arg in self.node.args if arg.type.is_fused])
context = {
'memviewslice_cname': MemoryView.memviewslice_cname,
'func_args': self.node.args,
'n_fused': len(fused_types),
'min_positional_args':
self.node.num_required_args - self.node.num_required_kw_args
if is_def else
sum(1 for arg in self.node.args if arg.default is None),
'name': orig_py_func.entry.name,
}
pyx_code = Code.PyxCodeWriter(context=context)
decl_code = Code.PyxCodeWriter(context=context)
decl_code.put_chunk(
u"""
cdef extern from *:
void __pyx_PyErr_Clear "PyErr_Clear" ()
type __Pyx_ImportNumPyArrayTypeIfAvailable()
int __Pyx_Is_Little_Endian()
""")
decl_code.indent()
pyx_code.put_chunk(
u"""
def __pyx_fused_cpdef(signatures, args, kwargs, defaults, _fused_sigindex={}):
# FIXME: use a typed signature - currently fails badly because
# default arguments inherit the types we specify here!
cdef list search_list
cdef dict sn, sigindex_node
dest_sig = [None] * {{n_fused}}
if kwargs is not None and not kwargs:
kwargs = None
cdef Py_ssize_t i
# instance check body
""")
pyx_code.indent() # indent following code to function body
pyx_code.named_insertion_point("imports")
pyx_code.named_insertion_point("func_defs")
pyx_code.named_insertion_point("local_variable_declarations")
fused_index = 0
default_idx = 0
all_buffer_types = OrderedSet()
seen_fused_types = set()
for i, arg in enumerate(self.node.args):
if arg.type.is_fused:
arg_fused_types = arg.type.get_fused_types()
if len(arg_fused_types) > 1:
raise NotImplementedError("Determination of more than one fused base "
"type per argument is not implemented.")
fused_type = arg_fused_types[0]
if arg.type.is_fused and fused_type not in seen_fused_types:
seen_fused_types.add(fused_type)
context.update(
arg_tuple_idx=i,
arg=arg,
dest_sig_idx=fused_index,
default_idx=default_idx,
)
normal_types, buffer_types, pythran_types, has_object_fallback = self._split_fused_types(arg)
self._unpack_argument(pyx_code)
# 'unrolled' loop, first match breaks out of it
if pyx_code.indenter("while 1:"):
if normal_types:
self._fused_instance_checks(normal_types, pyx_code, env)
if buffer_types or pythran_types:
env.use_utility_code(Code.UtilityCode.load_cached("IsLittleEndian", "ModuleSetupCode.c"))
self._buffer_checks(buffer_types, pythran_types, pyx_code, decl_code, env)
if has_object_fallback:
pyx_code.context.update(specialized_type_name='object')
pyx_code.putln(self.match)
else:
pyx_code.putln(self.no_match)
pyx_code.putln("break")
pyx_code.dedent()
fused_index += 1
all_buffer_types.update(buffer_types)
all_buffer_types.update(ty.org_buffer for ty in pythran_types)
if arg.default:
default_idx += 1
if all_buffer_types:
self._buffer_declarations(pyx_code, decl_code, all_buffer_types, pythran_types)
env.use_utility_code(Code.UtilityCode.load_cached("Import", "ImportExport.c"))
env.use_utility_code(Code.UtilityCode.load_cached("ImportNumPyArray", "ImportExport.c"))
self._fused_signature_index(pyx_code)
pyx_code.put_chunk(
u"""
sigindex_matches = []
sigindex_candidates = [_fused_sigindex]
for dst_type in dest_sig:
found_matches = []
found_candidates = []
# Make two seperate lists: One for signature sub-trees
# with at least one definite match, and another for
# signature sub-trees with only ambiguous matches
# (where `dest_sig[i] is None`).
if dst_type is None:
for sn in sigindex_matches:
found_matches.extend(sn.values())
for sn in sigindex_candidates:
found_candidates.extend(sn.values())
else:
for search_list in (sigindex_matches, sigindex_candidates):
for sn in search_list:
if dst_type in sn:
found_matches.append(sn[dst_type])
sigindex_matches = found_matches
sigindex_candidates = found_candidates
if not (found_matches or found_candidates):
break
candidates = sigindex_matches
if not candidates:
raise TypeError("No matching signature found")
elif len(candidates) > 1:
raise TypeError("Function call with ambiguous argument types")
else:
return (signatures)[candidates[0]]
""")
fragment_code = pyx_code.getvalue()
# print decl_code.getvalue()
# print fragment_code
from .Optimize import ConstantFolding
fragment = TreeFragment.TreeFragment(
fragment_code, level='module', pipeline=[ConstantFolding()])
ast = TreeFragment.SetPosTransform(self.node.pos)(fragment.root)
UtilityCode.declare_declarations_in_scope(
decl_code.getvalue(), env.global_scope())
ast.scope = env
# FIXME: for static methods of cdef classes, we build the wrong signature here: first arg becomes 'self'
ast.analyse_declarations(env)
py_func = ast.stats[-1] # the DefNode
self.fragment_scope = ast.scope
if isinstance(self.node, DefNode):
py_func.specialized_cpdefs = self.nodes[:]
else:
py_func.specialized_cpdefs = [n.py_func for n in self.nodes]
return py_func
def update_fused_defnode_entry(self, env):
copy_attributes = (
'name', 'pos', 'cname', 'func_cname', 'pyfunc_cname',
'pymethdef_cname', 'doc', 'doc_cname', 'is_member',
'scope'
)
entry = self.py_func.entry
for attr in copy_attributes:
setattr(entry, attr,
getattr(self.orig_py_func.entry, attr))
self.py_func.name = self.orig_py_func.name
self.py_func.doc = self.orig_py_func.doc
env.entries.pop('__pyx_fused_cpdef', None)
if isinstance(self.node, DefNode):
env.entries[entry.name] = entry
else:
env.entries[entry.name].as_variable = entry
env.pyfunc_entries.append(entry)
self.py_func.entry.fused_cfunction = self
for node in self.nodes:
if isinstance(self.node, DefNode):
node.fused_py_func = self.py_func
else:
node.py_func.fused_py_func = self.py_func
node.entry.as_variable = entry
self.synthesize_defnodes()
self.stats.append(self.__signatures__)
def analyse_expressions(self, env):
"""
Analyse the expressions. Take care to only evaluate default arguments
once and clone the result for all specializations
"""
for fused_compound_type in self.fused_compound_types:
for fused_type in fused_compound_type.get_fused_types():
for specialization_type in fused_type.types:
if specialization_type.is_complex:
specialization_type.create_declaration_utility_code(env)
if self.py_func:
self.__signatures__ = self.__signatures__.analyse_expressions(env)
self.py_func = self.py_func.analyse_expressions(env)
self.resulting_fused_function = self.resulting_fused_function.analyse_expressions(env)
self.fused_func_assignment = self.fused_func_assignment.analyse_expressions(env)
self.defaults = defaults = []
for arg in self.node.args:
if arg.default:
arg.default = arg.default.analyse_expressions(env)
defaults.append(ProxyNode(arg.default))
else:
defaults.append(None)
for i, stat in enumerate(self.stats):
stat = self.stats[i] = stat.analyse_expressions(env)
if isinstance(stat, FuncDefNode) and stat is not self.py_func:
# the dispatcher specifically doesn't want its defaults overriding
for arg, default in zip(stat.args, defaults):
if default is not None:
arg.default = CloneNode(default).coerce_to(arg.type, env)
if self.py_func:
args = [CloneNode(default) for default in defaults if default]
self.defaults_tuple = TupleNode(self.pos, args=args)
self.defaults_tuple = self.defaults_tuple.analyse_types(env, skip_children=True).coerce_to_pyobject(env)
self.defaults_tuple = ProxyNode(self.defaults_tuple)
self.code_object = ProxyNode(self.specialized_pycfuncs[0].code_object)
fused_func = self.resulting_fused_function.arg
fused_func.defaults_tuple = CloneNode(self.defaults_tuple)
fused_func.code_object = CloneNode(self.code_object)
for i, pycfunc in enumerate(self.specialized_pycfuncs):
pycfunc.code_object = CloneNode(self.code_object)
pycfunc = self.specialized_pycfuncs[i] = pycfunc.analyse_types(env)
pycfunc.defaults_tuple = CloneNode(self.defaults_tuple)
return self
def synthesize_defnodes(self):
"""
Create the __signatures__ dict of PyCFunctionNode specializations.
"""
if isinstance(self.nodes[0], CFuncDefNode):
nodes = [node.py_func for node in self.nodes]
else:
nodes = self.nodes
# For the moment, fused functions do not support METH_FASTCALL
for node in nodes:
node.entry.signature.use_fastcall = False
signatures = [StringEncoding.EncodedString(node.specialized_signature_string)
for node in nodes]
keys = [ExprNodes.StringNode(node.pos, value=sig)
for node, sig in zip(nodes, signatures)]
values = [ExprNodes.PyCFunctionNode.from_defnode(node, binding=True)
for node in nodes]
self.__signatures__ = ExprNodes.DictNode.from_pairs(self.pos, zip(keys, values))
self.specialized_pycfuncs = values
for pycfuncnode in values:
pycfuncnode.is_specialization = True
def generate_function_definitions(self, env, code):
if self.py_func:
self.py_func.pymethdef_required = True
self.fused_func_assignment.generate_function_definitions(env, code)
for stat in self.stats:
if isinstance(stat, FuncDefNode) and stat.entry.used:
code.mark_pos(stat.pos)
stat.generate_function_definitions(env, code)
def generate_execution_code(self, code):
# Note: all def function specialization are wrapped in PyCFunction
# nodes in the self.__signatures__ dictnode.
for default in self.defaults:
if default is not None:
default.generate_evaluation_code(code)
if self.py_func:
self.defaults_tuple.generate_evaluation_code(code)
self.code_object.generate_evaluation_code(code)
for stat in self.stats:
code.mark_pos(stat.pos)
if isinstance(stat, ExprNodes.ExprNode):
stat.generate_evaluation_code(code)
else:
stat.generate_execution_code(code)
if self.__signatures__:
self.resulting_fused_function.generate_evaluation_code(code)
code.putln(
"((__pyx_FusedFunctionObject *) %s)->__signatures__ = %s;" %
(self.resulting_fused_function.result(),
self.__signatures__.result()))
self.__signatures__.generate_giveref(code)
self.__signatures__.generate_post_assignment_code(code)
self.__signatures__.free_temps(code)
self.fused_func_assignment.generate_execution_code(code)
# Dispose of results
self.resulting_fused_function.generate_disposal_code(code)
self.resulting_fused_function.free_temps(code)
self.defaults_tuple.generate_disposal_code(code)
self.defaults_tuple.free_temps(code)
self.code_object.generate_disposal_code(code)
self.code_object.free_temps(code)
for default in self.defaults:
if default is not None:
default.generate_disposal_code(code)
default.free_temps(code)
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/Future.py 0000664 0000000 0000000 00000001165 13766135517 0030120 0 ustar 00root root 0000000 0000000 def _get_feature(name):
import __future__
# fall back to a unique fake object for earlier Python versions or Python 3
return getattr(__future__, name, object())
unicode_literals = _get_feature("unicode_literals")
with_statement = _get_feature("with_statement") # dummy
division = _get_feature("division")
print_function = _get_feature("print_function")
absolute_import = _get_feature("absolute_import")
nested_scopes = _get_feature("nested_scopes") # dummy
generators = _get_feature("generators") # dummy
generator_stop = _get_feature("generator_stop")
annotations = _get_feature("annotations")
del _get_feature
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/Interpreter.py 0000664 0000000 0000000 00000004102 13766135517 0031143 0 ustar 00root root 0000000 0000000 """
This module deals with interpreting the parse tree as Python
would have done, in the compiler.
For now this only covers parse tree to value conversion of
compile-time values.
"""
from __future__ import absolute_import
from .Nodes import *
from .ExprNodes import *
from .Errors import CompileError
class EmptyScope(object):
def lookup(self, name):
return None
empty_scope = EmptyScope()
def interpret_compiletime_options(optlist, optdict, type_env=None, type_args=()):
"""
Tries to interpret a list of compile time option nodes.
The result will be a tuple (optlist, optdict) but where
all expression nodes have been interpreted. The result is
in the form of tuples (value, pos).
optlist is a list of nodes, while optdict is a DictNode (the
result optdict is a dict)
If type_env is set, all type nodes will be analysed and the resulting
type set. Otherwise only interpretateable ExprNodes
are allowed, other nodes raises errors.
A CompileError will be raised if there are problems.
"""
def interpret(node, ix):
if ix in type_args:
if type_env:
type = node.analyse_as_type(type_env)
if not type:
raise CompileError(node.pos, "Invalid type.")
return (type, node.pos)
else:
raise CompileError(node.pos, "Type not allowed here.")
else:
if (sys.version_info[0] >=3 and
isinstance(node, StringNode) and
node.unicode_value is not None):
return (node.unicode_value, node.pos)
return (node.compile_time_value(empty_scope), node.pos)
if optlist:
optlist = [interpret(x, ix) for ix, x in enumerate(optlist)]
if optdict:
assert isinstance(optdict, DictNode)
new_optdict = {}
for item in optdict.key_value_pairs:
new_key, dummy = interpret(item.key, None)
new_optdict[new_key] = interpret(item.value, item.key.value)
optdict = new_optdict
return (optlist, new_optdict)
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/Lexicon.py 0000664 0000000 0000000 00000027544 13766135517 0030260 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# cython: language_level=3, py2_import=True
#
# Cython Scanner - Lexical Definitions
#
from __future__ import absolute_import, unicode_literals
raw_prefixes = "rR"
bytes_prefixes = "bB"
string_prefixes = "fFuU" + bytes_prefixes
char_prefixes = "cC"
any_string_prefix = raw_prefixes + string_prefixes + char_prefixes
IDENT = 'IDENT'
def make_lexicon():
from ..Plex import \
Str, Any, AnyBut, AnyChar, Rep, Rep1, Opt, Bol, Eol, Eof, \
TEXT, IGNORE, Method, State, Lexicon, Range
nonzero_digit = Any("123456789")
digit = Any("0123456789")
bindigit = Any("01")
octdigit = Any("01234567")
hexdigit = Any("0123456789ABCDEFabcdef")
indentation = Bol + Rep(Any(" \t"))
# The list of valid unicode identifier characters are pretty slow to generate at runtime,
# and require Python3, so are just included directly here
# (via the generated code block at the bottom of the file)
unicode_start_character = (Any(unicode_start_ch_any) | Range(unicode_start_ch_range))
unicode_continuation_character = (
unicode_start_character |
Any(unicode_continuation_ch_any) | Range(unicode_continuation_ch_range))
def underscore_digits(d):
return Rep1(d) + Rep(Str("_") + Rep1(d))
def prefixed_digits(prefix, digits):
return prefix + Opt(Str("_")) + underscore_digits(digits)
decimal = underscore_digits(digit)
dot = Str(".")
exponent = Any("Ee") + Opt(Any("+-")) + decimal
decimal_fract = (decimal + dot + Opt(decimal)) | (dot + decimal)
#name = letter + Rep(letter | digit)
name = unicode_start_character + Rep(unicode_continuation_character)
intconst = (prefixed_digits(nonzero_digit, digit) | # decimal literals with underscores must not start with '0'
(Str("0") + (prefixed_digits(Any("Xx"), hexdigit) |
prefixed_digits(Any("Oo"), octdigit) |
prefixed_digits(Any("Bb"), bindigit) )) |
underscore_digits(Str('0')) # 0_0_0_0... is allowed as a decimal literal
| Rep1(digit) # FIXME: remove these Py2 style decimal/octal literals (PY_VERSION_HEX < 3)
)
intsuffix = (Opt(Any("Uu")) + Opt(Any("Ll")) + Opt(Any("Ll"))) | (Opt(Any("Ll")) + Opt(Any("Ll")) + Opt(Any("Uu")))
intliteral = intconst + intsuffix
fltconst = (decimal_fract + Opt(exponent)) | (decimal + exponent)
imagconst = (intconst | fltconst) + Any("jJ")
# invalid combinations of prefixes are caught in p_string_literal
beginstring = Opt(Rep(Any(string_prefixes + raw_prefixes)) |
Any(char_prefixes)
) + (Str("'") | Str('"') | Str("'''") | Str('"""'))
two_oct = octdigit + octdigit
three_oct = octdigit + octdigit + octdigit
two_hex = hexdigit + hexdigit
four_hex = two_hex + two_hex
escapeseq = Str("\\") + (two_oct | three_oct |
Str('N{') + Rep(AnyBut('}')) + Str('}') |
Str('u') + four_hex | Str('x') + two_hex |
Str('U') + four_hex + four_hex | AnyChar)
bra = Any("([{")
ket = Any(")]}")
punct = Any(":,;+-*/|&<>=.%`~^?!@")
diphthong = Str("==", "<>", "!=", "<=", ">=", "<<", ">>", "**", "//",
"+=", "-=", "*=", "/=", "%=", "|=", "^=", "&=",
"<<=", ">>=", "**=", "//=", "->", "@=")
spaces = Rep1(Any(" \t\f"))
escaped_newline = Str("\\\n")
lineterm = Eol + Opt(Str("\n"))
comment = Str("#") + Rep(AnyBut("\n"))
return Lexicon([
(name, Method('normalize_ident')),
(intliteral, Method('strip_underscores', symbol='INT')),
(fltconst, Method('strip_underscores', symbol='FLOAT')),
(imagconst, Method('strip_underscores', symbol='IMAG')),
(punct | diphthong, TEXT),
(bra, Method('open_bracket_action')),
(ket, Method('close_bracket_action')),
(lineterm, Method('newline_action')),
(beginstring, Method('begin_string_action')),
(comment, IGNORE),
(spaces, IGNORE),
(escaped_newline, IGNORE),
State('INDENT', [
(comment + lineterm, Method('commentline')),
(Opt(spaces) + Opt(comment) + lineterm, IGNORE),
(indentation, Method('indentation_action')),
(Eof, Method('eof_action'))
]),
State('SQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut("'\"\n\\")), 'CHARS'),
(Str('"'), 'CHARS'),
(Str("\n"), Method('unclosed_string_action')),
(Str("'"), Method('end_string_action')),
(Eof, 'EOF')
]),
State('DQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut('"\n\\')), 'CHARS'),
(Str("'"), 'CHARS'),
(Str("\n"), Method('unclosed_string_action')),
(Str('"'), Method('end_string_action')),
(Eof, 'EOF')
]),
State('TSQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut("'\"\n\\")), 'CHARS'),
(Any("'\""), 'CHARS'),
(Str("\n"), 'NEWLINE'),
(Str("'''"), Method('end_string_action')),
(Eof, 'EOF')
]),
State('TDQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut('"\'\n\\')), 'CHARS'),
(Any("'\""), 'CHARS'),
(Str("\n"), 'NEWLINE'),
(Str('"""'), Method('end_string_action')),
(Eof, 'EOF')
]),
(Eof, Method('eof_action'))
],
# FIXME: Plex 1.9 needs different args here from Plex 1.1.4
#debug_flags = scanner_debug_flags,
#debug_file = scanner_dump_file
)
# BEGIN GENERATED CODE
# generated with:
# cpython 3.10.0a0 (heads/master:2b0e654f91, May 29 2020, 16:17:52)
unicode_start_ch_any = (
u"_ªµºˬˮͿΆΌՙەۿܐޱߺࠚࠤࠨऽॐলঽৎৼਫ਼ઽૐૹଽୱஃஜௐఽಀಽೞഽൎලาຄລາຽໆༀဿၡႎჇჍቘዀៗៜᢪᪧᳺὙ"
u"ὛὝιⁱⁿℂℇℕℤΩℨⅎⴧⴭⵯꣻꧏꩺꪱꫀꫂיִמּﹱﹳﹷﹹﹻﹽ𐠈𐠼𐨀𐼧𑅄𑅇𑅶𑇚𑇜𑊈𑌽𑍐𑓇𑙄𑚸𑤉𑤿𑥁𑧡𑧣𑨀𑨺𑩐𑪝𑱀𑵆𑶘𑾰𖽐𖿣𝒢"
u"𝒻𝕆𞅎𞥋𞸤𞸧𞸹𞸻𞹂𞹇𞹉𞹋𞹔𞹗𞹙𞹛𞹝𞹟𞹤𞹾"
)
unicode_start_ch_range = (
u"AZazÀÖØöøˁˆˑˠˤͰʹͶͷͻͽΈΊΎΡΣϵϷҁҊԯԱՖՠֈאתׯײؠيٮٯٱۓۥۦۮۯۺۼܒܯݍޥߊߪߴߵࠀࠕ"
u"ࡀࡘࡠࡪࢠࢴࢶࣇऄहक़ॡॱঀঅঌএঐওনপরশহড়ঢ়য়ৡৰৱਅਊਏਐਓਨਪਰਲਲ਼ਵਸ਼ਸਹਖ਼ੜੲੴઅઍએઑઓનપરલળવહ"
u"ૠૡଅଌଏଐଓନପରଲଳଵହଡ଼ଢ଼ୟୡஅஊஎஐஒகஙசஞடணதநபமஹఅఌఎఐఒనపహౘౚౠౡಅಌಎಐಒನಪಳವಹೠೡೱೲ"
u"ഄഌഎഐഒഺൔൖൟൡൺൿඅඖකනඳරවෆกะเๆກຂຆຊຌຣວະເໄໜໟཀཇཉཬྈྌကဪၐၕၚၝၥၦၮၰၵႁႠჅაჺჼቈ"
u"ቊቍቐቖቚቝበኈኊኍነኰኲኵኸኾዂዅወዖዘጐጒጕጘፚᎀᎏᎠᏵᏸᏽᐁᙬᙯᙿᚁᚚᚠᛪᛮᛸᜀᜌᜎᜑᜠᜱᝀᝑᝠᝬᝮᝰកឳᠠᡸᢀᢨ"
u"ᢰᣵᤀᤞᥐᥭᥰᥴᦀᦫᦰᧉᨀᨖᨠᩔᬅᬳᭅᭋᮃᮠᮮᮯᮺᯥᰀᰣᱍᱏᱚᱽᲀᲈᲐᲺᲽᲿᳩᳬᳮᳳᳵᳶᴀᶿḀἕἘἝἠὅὈὍὐὗὟώᾀᾴ"
u"ᾶᾼῂῄῆῌῐΐῖΊῠῬῲῴῶῼₐₜℊℓ℘ℝKℹℼℿⅅⅉⅠↈⰀⰮⰰⱞⱠⳤⳫⳮⳲⳳⴀⴥⴰⵧⶀⶖⶠⶦⶨⶮⶰⶶⶸⶾⷀⷆⷈⷎⷐⷖ"
u"ⷘⷞ々〇〡〩〱〵〸〼ぁゖゝゟァヺーヿㄅㄯㄱㆎㆠㆿㇰㇿ㐀䶿一鿼ꀀꒌꓐꓽꔀꘌꘐꘟꘪꘫꙀꙮꙿꚝꚠꛯꜗꜟꜢꞈꞋꞿꟂꟊꟵꠁꠃꠅꠇꠊ"
u"ꠌꠢꡀꡳꢂꢳꣲꣷꣽꣾꤊꤥꤰꥆꥠꥼꦄꦲꧠꧤꧦꧯꧺꧾꨀꨨꩀꩂꩄꩋꩠꩶꩾꪯꪵꪶꪹꪽꫛꫝꫠꫪꫲꫴꬁꬆꬉꬎꬑꬖꬠꬦꬨꬮꬰꭚꭜꭩꭰꯢ"
u"가힣ힰퟆퟋퟻ豈舘並龎ffstﬓﬗײַﬨשׁזּטּלּנּסּףּפּצּﮱﯓﱝﱤﴽﵐﶏﶒﷇﷰﷹﹿﻼAZazヲンᅠ하ᅦᅧᅬᅭᅲᅳᅵ𐀀𐀋𐀍𐀦𐀨𐀺"
u"𐀼𐀽𐀿𐁍𐁐𐁝𐂀𐃺𐅀𐅴𐊀𐊜𐊠𐋐𐌀𐌟𐌭𐍊𐍐𐍵𐎀𐎝𐎠𐏃𐏈𐏏𐏑𐏕𐐀𐒝𐒰𐓓𐓘𐓻𐔀𐔧𐔰𐕣𐘀𐜶𐝀𐝕𐝠𐝧𐠀𐠅𐠊𐠵𐠷𐠸𐠿𐡕𐡠𐡶𐢀𐢞𐣠𐣲𐣴𐣵"
u"𐤀𐤕𐤠𐤹𐦀𐦷𐦾𐦿𐨐𐨓𐨕𐨗𐨙𐨵𐩠𐩼𐪀𐪜𐫀𐫇𐫉𐫤𐬀𐬵𐭀𐭕𐭠𐭲𐮀𐮑𐰀𐱈𐲀𐲲𐳀𐳲𐴀𐴣𐺀𐺩𐺰𐺱𐼀𐼜𐼰𐽅𐾰𐿄𐿠𐿶𑀃𑀷𑂃𑂯𑃐𑃨𑄃𑄦𑅐𑅲"
u"𑆃𑆲𑇁𑇄𑈀𑈑𑈓𑈫𑊀𑊆𑊊𑊍𑊏𑊝𑊟𑊨𑊰𑋞𑌅𑌌𑌏𑌐𑌓𑌨𑌪𑌰𑌲𑌳𑌵𑌹𑍝𑍡𑐀𑐴𑑇𑑊𑑟𑑡𑒀𑒯𑓄𑓅𑖀𑖮𑗘𑗛𑘀𑘯𑚀𑚪𑜀𑜚𑠀𑠫𑢠𑣟𑣿𑤆𑤌𑤓"
u"𑤕𑤖𑤘𑤯𑦠𑦧𑦪𑧐𑨋𑨲𑩜𑪉𑫀𑫸𑰀𑰈𑰊𑰮𑱲𑲏𑴀𑴆𑴈𑴉𑴋𑴰𑵠𑵥𑵧𑵨𑵪𑶉𑻠𑻲𒀀𒎙𒐀𒑮𒒀𒕃𓀀𓐮𔐀𔙆𖠀𖨸𖩀𖩞𖫐𖫭𖬀𖬯𖭀𖭃𖭣𖭷𖭽𖮏𖹀𖹿"
u"𖼀𖽊𖾓𖾟𖿠𖿡𗀀𘟷𘠀𘳕𘴀𘴈𛀀𛄞𛅐𛅒𛅤𛅧𛅰𛋻𛰀𛱪𛱰𛱼𛲀𛲈𛲐𛲙𝐀𝑔𝑖𝒜𝒞𝒟𝒥𝒦𝒩𝒬𝒮𝒹𝒽𝓃𝓅𝔅𝔇𝔊𝔍𝔔𝔖𝔜𝔞𝔹𝔻𝔾𝕀𝕄𝕊𝕐𝕒𝚥"
u"𝚨𝛀𝛂𝛚𝛜𝛺𝛼𝜔𝜖𝜴𝜶𝝎𝝐𝝮𝝰𝞈𝞊𝞨𝞪𝟂𝟄𝟋𞄀𞄬𞄷𞄽𞋀𞋫𞠀𞣄𞤀𞥃𞸀𞸃𞸅𞸟𞸡𞸢𞸩𞸲𞸴𞸷𞹍𞹏𞹑𞹒𞹡𞹢𞹧𞹪𞹬𞹲𞹴𞹷𞹹𞹼𞺀𞺉𞺋𞺛"
u"𞺡𞺣𞺥𞺩𞺫𞺻𠀀𪛝𪜀𫜴𫝀𫠝𫠠𬺡𬺰𮯠丽𪘀"
)
unicode_continuation_ch_any = (
u"··়ׇֿٰܑ߽ৗ਼৾ੑੵ઼଼ஂௗ಼ൗ්ූัັ༹༵༷࿆᳭ᢩ៝᳴⁔⵿⃡꙯ꠂ꠆ꠋ꠬ꧥꩃﬞꪰ꫁_𑅳𐨿𐇽𐋠𑈾𑍗𑑞𑥀𑧤𑩇𑴺𑵇𖽏𖿤𝩵"
u"𝪄"
)
unicode_continuation_ch_range = (
u"09ֽׁׂًؚ֑ׅ̀ͯ҃҇ׄؐ٩۪ۭۖۜ۟ۤۧۨ۰۹ܰ݊ަް߀߉࡙࡛࣓ࣣ߫߳ࠖ࠙ࠛࠣࠥࠧࠩ࠭࣡ःऺ़ाॏ॑ॗॢॣ०९ঁঃ"
u"াৄেৈো্ৢৣ০৯ਁਃਾੂੇੈੋ੍੦ੱઁઃાૅેૉો્ૢૣ૦૯ૺ૿ଁଃାୄେୈୋ୍୕ୗୢୣ୦୯ாூெைொ்௦௯ఀఄాౄ"
u"ెైొ్ౕౖౢౣ౦౯ಁಃಾೄೆೈೊ್ೕೖೢೣ೦೯ഀഃ഻഼ാൄെൈൊ്ൢൣ൦൯ඁඃාුෘෟ෦෯ෲෳำฺ็๎๐๙ຳຼ່ໍ໐໙"
u"༘༙༠༩༾༿྄ཱ྆྇ྍྗྙྼါှ၀၉ၖၙၞၠၢၤၧၭၱၴႂႍႏႝ፝፟፩፱ᜒ᜔ᜲ᜴ᝒᝓᝲᝳ឴៓០៩᠋᠍᠐᠙ᤠᤫᤰ᤻᥆᥏᧐᧚"
u"ᨗᨛᩕᩞ᩠᩿᩼᪉᪐᪙᪽ᪿᫀ᪰ᬀᬄ᬴᭄᭐᭙᭫᭳ᮀᮂᮡᮭ᮰᮹᯦᯳ᰤ᰷᱀᱉᱐᱙᳔᳨᳐᳒᳷᷹᷿᳹᷀᷻‿⁀⃥゙゚〪〯⃐⃜⃰⳯⳱ⷠⷿ"
u"꘠꘩ꙴ꙽ꚞꚟ꛰꛱ꠣꠧꢀꢁꢴꣅ꣐꣙꣠꣱ꣿ꤉ꤦ꤭ꥇ꥓ꦀꦃ꦳꧀꧐꧙꧰꧹ꨩꨶꩌꩍ꩐꩙ꩻꩽꪴꪲꪷꪸꪾ꪿ꫫꫯꫵ꫶ꯣꯪ꯬꯭꯰꯹︀️︠︯"
u"︳︴﹍﹏09゙゚𐍶𐍺𐒠𐒩𐨁𐨃𐨅𐨆𐨌𐨺𐫦𐨏𐨸𐫥𐴤𐴧𐴰𐴹𐽆𐽐𐺫𐺬𑀀𑀂𑀸𑁆𑁦𑁯𑁿𑂂𑂰𑂺𑃰𑃹𑄀𑄂𑄧𑄴𑄶𑄿𑅅𑅆𑆀𑆂𑆳𑇀𑇉𑇌𑇎𑇙𑈬𑈷"
u"𑋟𑋪𑋰𑋹𑌀𑌃𑌻𑌼𑌾𑍄𑍇𑍈𑍋𑍍𑍢𑍣𑍦𑍬𑍰𑍴𑐵𑑆𑑐𑑙𑒰𑓃𑓐𑓙𑖯𑖵𑖸𑗀𑗜𑗝𑘰𑙀𑙐𑙙𑚫𑚷𑛀𑛉𑜝𑜫𑜰𑜹𑠬𑠺𑣠𑣩𑤰𑤵𑤷𑤸𑤻𑤾𑥂𑥃𑥐𑥙"
u"𑧑𑧗𑧚𑧠𑨁𑨊𑨳𑨹𑨻𑨾𑩑𑩛𑪊𑪙𑰯𑰶𑰸𑰿𑱐𑱙𑲒𑲧𑲩𑲶𑴱𑴶𑴼𑴽𑴿𑵅𑵐𑵙𑶊𑶎𑶐𑶑𑶓𑶗𑶠𑶩𑻳𑻶𖩠𖩩𖫰𖫴𖬰𖬶𖭐𖭙𖽑𖾇𖾏𖾒𖿰𖿱𛲝𛲞𝅩𝅥"
u"𝅲𝅻𝆂𝆋𝅭𝆅𝆪𝆭𝉂𝉄𝟎𝟿𝨀𝨶𝨻𝩬𝪛𝪟𝪡𝪯𞀀𞀆𞀈𞀘𞀛𞀡𞀣𞀤𞀦𞀪𞄰𞄶𞅀𞅉𞋬𞋹𞥊𞣐𞣖𞥄𞥐𞥙🯰🯹"
)
# END GENERATED CODE
cython-5ea4b23c2e24e04c9fa48ec816c0fc3483fea21b-Cython-Compiler/Cython/Compiler/Main.py 0000664 0000000 0000000 00000070261 13766135517 0027535 0 ustar 00root root 0000000 0000000 #
# Cython Top Level
#
from __future__ import absolute_import
import os
import re
import sys
import io
if sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2] < (3, 3):
sys.stderr.write("Sorry, Cython requires Python 2.7 or 3.3+, found %d.%d\n" % tuple(sys.version_info[:2]))
sys.exit(1)
try:
from __builtin__ import basestring
except ImportError:
basestring = str
# Do not import Parsing here, import it when needed, because Parsing imports
# Nodes, which globally needs debug command line options initialized to set a
# conditional metaclass. These options are processed by CmdLine called from
# main() in this file.
# import Parsing
from . import Errors
from .StringEncoding import EncodedString
from .Scanning import PyrexScanner, FileSourceDescriptor
from .Errors import PyrexError, CompileError, error, warning
from .Symtab import ModuleScope
from .. import Utils
from . import Options
from .Options import CompilationOptions, default_options
from .CmdLine import parse_command_line
from .Lexicon import (unicode_start_ch_any, unicode_continuation_ch_any,
unicode_start_ch_range, unicode_continuation_ch_range)
def _make_range_re(chrs):
out = []
for i in range(0, len(chrs), 2):
out.append(u"{0}-{1}".format(chrs[i], chrs[i+1]))
return u"".join(out)
# py2 version looked like r"[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)*$"
module_name_pattern = u"[{0}{1}][{0}{2}{1}{3}]*".format(
unicode_start_ch_any, _make_range_re(unicode_start_ch_range),
unicode_continuation_ch_any,
_make_range_re(unicode_continuation_ch_range))
module_name_pattern = re.compile(u"{0}(\\.{0})*$".format(module_name_pattern))
standard_include_path = os.path.abspath(
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'Includes'))
class Context(object):
# This class encapsulates the context needed for compiling
# one or more Cython implementation files along with their
# associated and imported declaration files. It includes
# the root of the module import namespace and the list
# of directories to search for include files.
#
# modules {string : ModuleScope}
# include_directories [string]
# future_directives [object]
# language_level int currently 2 or 3 for Python 2/3
cython_scope = None
language_level = None # warn when not set but default to Py2
def __init__(self, include_directories, compiler_directives, cpp=False,
language_level=None, options=None):
# cython_scope is a hack, set to False by subclasses, in order to break
# an infinite loop.
# Better code organization would fix it.
from . import Builtin, CythonScope
self.modules = {"__builtin__" : Builtin.builtin_scope}
self.cython_scope = CythonScope.create_cython_scope(self)
self.modules["cython"] = self.cython_scope
self.include_directories = include_directories
self.future_directives = set()
self.compiler_directives = compiler_directives
self.cpp = cpp
self.options = options
self.pxds = {} # full name -> node tree
self._interned = {} # (type(value), value, *key_args) -> interned_value
if language_level is not None:
self.set_language_level(language_level)
self.gdb_debug_outputwriter = None
@classmethod
def from_options(cls, options):
return cls(options.include_path, options.compiler_directives,
options.cplus, options.language_level, options=options)
def set_language_level(self, level):
from .Future import print_function, unicode_literals, absolute_import, division, generator_stop
future_directives = set()
if level == '3str':
level = 3
else:
level = int(level)
if level >= 3:
future_directives.add(unicode_literals)
if level >= 3:
future_directives.update([print_function, absolute_import, division, generator_stop])
self.language_level = level
self.future_directives = future_directives
if level >= 3:
self.modules['builtins'] = self.modules['__builtin__']
def intern_ustring(self, value, encoding=None):
key = (EncodedString, value, encoding)
try:
return self._interned[key]
except KeyError:
pass
value = EncodedString(value)
if encoding:
value.encoding = encoding
self._interned[key] = value
return value
# pipeline creation functions can now be found in Pipeline.py
def process_pxd(self, source_desc, scope, module_name):
from . import Pipeline
if isinstance(source_desc, FileSourceDescriptor) and source_desc._file_type == 'pyx':
source = CompilationSource(source_desc, module_name, os.getcwd())
result_sink = create_default_resultobj(source, self.options)
pipeline = Pipeline.create_pyx_as_pxd_pipeline(self, result_sink)
result = Pipeline.run_pipeline(pipeline, source)
else:
pipeline = Pipeline.create_pxd_pipeline(self, scope, module_name)
result = Pipeline.run_pipeline(pipeline, source_desc)
return result
def nonfatal_error(self, exc):
return Errors.report_error(exc)
def find_module(self, module_name, relative_to=None, pos=None, need_pxd=1,
absolute_fallback=True):
# Finds and returns the module scope corresponding to
# the given relative or absolute module name. If this
# is the first time the module has been requested, finds
# the corresponding .pxd file and process it.
# If relative_to is not None, it must be a module scope,
# and the module will first be searched for relative to
# that module, provided its name is not a dotted name.
debug_find_module = 0
if debug_find_module:
print("Context.find_module: module_name = %s, relative_to = %s, pos = %s, need_pxd = %s" % (
module_name, relative_to, pos, need_pxd))
scope = None
pxd_pathname = None
if relative_to:
if module_name:
# from .module import ...
qualified_name = relative_to.qualify_name(module_name)
else:
# from . import ...
qualified_name = relative_to.qualified_name
scope = relative_to
relative_to = None
else:
qualified_name = module_name
if not module_name_pattern.match(qualified_name):
raise CompileError(pos or (module_name, 0, 0),
u"'%s' is not a valid module name" % module_name)
if relative_to:
if debug_find_module:
print("...trying relative import")
scope = relative_to.lookup_submodule(module_name)
if not scope:
pxd_pathname = self.find_pxd_file(qualified_name, pos)
if pxd_pathname:
scope = relative_to.find_submodule(module_name)
if not scope:
if debug_find_module:
print("...trying absolute import")
if absolute_fallback:
qualified_name = module_name
scope = self
for name in qualified_name.split("."):
scope = scope.find_submodule(name)
if debug_find_module:
print("...scope = %s" % scope)
if not scope.pxd_file_loaded:
if debug_find_module:
print("...pxd not loaded")
if not pxd_pathname:
if debug_find_module:
print("...looking for pxd file")
# Only look in sys.path if we are explicitly looking
# for a .pxd file.
pxd_pathname = self.find_pxd_file(qualified_name, pos, sys_path=need_pxd)
if debug_find_module:
print("......found %s" % pxd_pathname)
if not pxd_pathname and need_pxd:
# Set pxd_file_loaded such that we don't need to
# look for the non-existing pxd file next time.
scope.pxd_file_loaded = True
package_pathname = self.search_include_directories(qualified_name, ".py", pos)
if package_pathname and package_pathname.endswith(Utils.PACKAGE_FILES):
pass
else:
error(pos, "'%s.pxd' not found" % qualified_name.replace('.', os.sep))
if pxd_pathname:
scope.pxd_file_loaded = True
try:
if debug_find_module:
print("Context.find_module: Parsing %s" % pxd_pathname)
rel_path = module_name.replace('.', os.sep) + os.path.splitext(pxd_pathname)[1]
if not pxd_pathname.endswith(rel_path):
rel_path = pxd_pathname # safety measure to prevent printing incorrect paths
source_desc = FileSourceDescriptor(pxd_pathname, rel_path)
err, result = self.process_pxd(source_desc, scope, qualified_name)
if err:
raise err
(pxd_codenodes, pxd_scope) = result
self.pxds[module_name] = (pxd_codenodes, pxd_scope)
except CompileError:
pass
return scope
def find_pxd_file(self, qualified_name, pos, sys_path=True):
# Search include path (and sys.path if sys_path is True) for
# the .pxd file corresponding to the given fully-qualified
# module name.
# Will find either a dotted filename or a file in a
# package directory. If a source file position is given,
# the directory containing the source file is searched first
# for a dotted filename, and its containing package root
# directory is searched first for a non-dotted filename.
pxd = self.search_include_directories(qualified_name, ".pxd", pos, sys_path=sys_path)
if pxd is None and Options.cimport_from_pyx:
return self.find_pyx_file(qualified_name, pos)
return pxd
def find_pyx_file(self, qualified_name, pos):
# Search include path for the .pyx file corresponding to the
# given fully-qualified module name, as for find_pxd_file().
return self.search_include_directories(qualified_name, ".pyx", pos)
def find_include_file(self, filename, pos):
# Search list of include directories for filename.
# Reports an error and returns None if not found.
path = self.search_include_directories(filename, "", pos,
include=True)
if not path:
error(pos, "'%s' not found" % filename)
return path
def search_include_directories(self, qualified_name, suffix, pos,
include=False, sys_path=False):
include_dirs = self.include_directories
if sys_path:
include_dirs = include_dirs + sys.path
# include_dirs must be hashable for caching in @cached_function
include_dirs = tuple(include_dirs + [standard_include_path])
return search_include_directories(include_dirs, qualified_name,
suffix, pos, include)
def find_root_package_dir(self, file_path):
return Utils.find_root_package_dir(file_path)
def check_package_dir(self, dir, package_names):
return Utils.check_package_dir(dir, tuple(package_names))
def c_file_out_of_date(self, source_path, output_path):
if not os.path.exists(output_path):
return 1
c_time = Utils.modification_time(output_path)
if Utils.file_newer_than(source_path, c_time):
return 1
pos = [source_path]
pxd_path = Utils.replace_suffix(source_path, ".pxd")
if os.path.exists(pxd_path) and Utils.file_newer_than(pxd_path, c_time):
return 1
for kind, name in self.read_dependency_file(source_path):
if kind == "cimport":
dep_path = self.find_pxd_file(name, pos)
elif kind == "include":
dep_path = self.search_include_directories(name, "", pos)
else:
continue
if dep_path and Utils.file_newer_than(dep_path, c_time):
return 1
return 0
def find_cimported_module_names(self, source_path):
return [ name for kind, name in self.read_dependency_file(source_path)
if kind == "cimport" ]
def is_package_dir(self, dir_path):
return Utils.is_package_dir(dir_path)
def read_dependency_file(self, source_path):
dep_path = Utils.replace_suffix(source_path, ".dep")
if os.path.exists(dep_path):
f = open(dep_path, "rU")
chunks = [ line.strip().split(" ", 1)
for line in f.readlines()
if " " in line.strip() ]
f.close()
return chunks
else:
return ()
def lookup_submodule(self, name):
# Look up a top-level module. Returns None if not found.
return self.modules.get(name, None)
def find_submodule(self, name):
# Find a top-level module, creating a new one if needed.
scope = self.lookup_submodule(name)
if not scope:
scope = ModuleScope(name,
parent_module = None, context = self)
self.modules[name] = scope
return scope
def parse(self, source_desc, scope, pxd, full_module_name):
if not isinstance(source_desc, FileSourceDescriptor):
raise RuntimeError("Only file sources for code supported")
source_filename = source_desc.filename
scope.cpp = self.cpp
# Parse the given source file and return a parse tree.
num_errors = Errors.num_errors
try:
with Utils.open_source_file(source_filename) as f:
from . import Parsing
s = PyrexScanner(f, source_desc, source_encoding = f.encoding,
scope = scope, context = self)
tree = Parsing.p_module(s, pxd, full_module_name)
if self.options.formal_grammar:
try:
from ..Parser import ConcreteSyntaxTree
except ImportError:
raise RuntimeError(
"Formal grammar can only be used with compiled Cython with an available pgen.")
ConcreteSyntaxTree.p_module(source_filename)
except UnicodeDecodeError as e:
#import traceback
#traceback.print_exc()
raise self._report_decode_error(source_desc, e)
if Errors.num_errors > num_errors:
raise CompileError()
return tree
def _report_decode_error(self, source_desc, exc):
msg = exc.args[-1]
position = exc.args[2]
encoding = exc.args[0]
line = 1
column = idx = 0
with io.open(source_desc.filename, "r", encoding='iso8859-1', newline='') as f:
for line, data in enumerate(f, 1):
idx += len(data)
if idx >= position:
column = position - (idx - len(data)) + 1
break
return error((source_desc, line, column),
"Decoding error, missing or incorrect coding= "
"at top of source (cannot decode with encoding %r: %s)" % (encoding, msg))
def extract_module_name(self, path, options):
# Find fully_qualified module name from the full pathname
# of a source file.
dir, filename = os.path.split(path)
module_name, _ = os.path.splitext(filename)
if "." in module_name:
return module_name
names = [module_name]
while self.is_package_dir(dir):
parent, package_name = os.path.split(dir)
if parent == dir:
break
names.append(package_name)
dir = parent
names.reverse()
return ".".join(names)
def setup_errors(self, options, result):
Errors.reset() # clear any remaining error state
if options.use_listing_file:
path = result.listing_file = Utils.replace_suffix(result.main_source_file, ".lis")
else:
path = None
Errors.open_listing_file(path=path,
echo_to_stderr=options.errors_to_stderr)
def teardown_errors(self, err, options, result):
source_desc = result.compilation_source.source_desc
if not isinstance(source_desc, FileSourceDescriptor):
raise RuntimeError("Only file sources for code supported")
Errors.close_listing_file()
result.num_errors = Errors.num_errors
if result.num_errors > 0:
err = True
if err and result.c_file:
try:
Utils.castrate_file(result.c_file, os.stat(source_desc.filename))
except EnvironmentError:
pass
result.c_file = None
def get_output_filename(source_filename, cwd, options):
if options.cplus:
c_suffix = ".cpp"
else:
c_suffix = ".c"
suggested_file_name = Utils.replace_suffix(source_filename, c_suffix)
if options.output_file:
out_path = os.path.join(cwd, options.output_file)
if os.path.isdir(out_path):
return os.path.join(out_path, os.path.basename(suggested_file_name))
else:
return out_path
else:
return suggested_file_name
def create_default_resultobj(compilation_source, options):
result = CompilationResult()
result.main_source_file = compilation_source.source_desc.filename
result.compilation_source = compilation_source
source_desc = compilation_source.source_desc
result.c_file = get_output_filename(source_desc.filename,
compilation_source.cwd, options)
result.embedded_metadata = options.embedded_metadata
return result
def run_pipeline(source, options, full_module_name=None, context=None):
from . import Pipeline
# ensure that the inputs are unicode (for Python 2)
if sys.version_info[0] == 2:
source = Utils.decode_filename(source)
if full_module_name:
full_module_name = Utils.decode_filename(full_module_name)
source_ext = os.path.splitext(source)[1]
options.configure_language_defaults(source_ext[1:]) # py/pyx
if context is None:
context = Context.from_options(options)
# Set up source object
cwd = os.getcwd()
abs_path = os.path.abspath(source)
full_module_name = full_module_name or context.extract_module_name(source, options)
full_module_name = EncodedString(full_module_name)
Utils.raise_error_if_module_name_forbidden(full_module_name)
if options.relative_path_in_code_position_comments:
rel_path = full_module_name.replace('.', os.sep) + source_ext
if not abs_path.endswith(rel_path):
rel_path = source # safety measure to prevent printing incorrect paths
else:
rel_path = abs_path
source_desc = FileSourceDescriptor(abs_path, rel_path)
source = CompilationSource(source_desc, full_module_name, cwd)
# Set up result object
result = create_default_resultobj(source, options)
if options.annotate is None:
# By default, decide based on whether an html file already exists.
html_filename = os.path.splitext(result.c_file)[0] + ".html"
if os.path.exists(html_filename):
with io.open(html_filename, "r", encoding="UTF-8") as html_file:
if u'