Commit 65da9d1b authored by Adrien Guinet's avatar Adrien Guinet

Add a Pythran backend for Numpy operation

When the user asked for it (thanks to the --np-pythran flag), use
Pythran's Numpy implementation as a backend for numpy operation. This
flag forces the C++ mode, as the Pythran implementation is written in
C++. Distutils integration through the 'np_pythran' flag of the
cythonize API is also provided.

This commit also adds a Pythran mode for the tests, that can enable the
pythran mode for the C++ tests, and allows the reuse of Cython tests to
test for the Pythran integration.
parent c5a56e4b
...@@ -16,6 +16,7 @@ except ImportError: ...@@ -16,6 +16,7 @@ except ImportError:
gzip_ext = '' gzip_ext = ''
import shutil import shutil
import subprocess import subprocess
import os
try: try:
import hashlib import hashlib
...@@ -42,6 +43,11 @@ except ImportError: ...@@ -42,6 +43,11 @@ except ImportError:
return os.path.curdir return os.path.curdir
return os.path.join(*rel_list) return os.path.join(*rel_list)
try:
import pythran
PythranAvailable = True
except:
PythranAvailable = False
from distutils.extension import Extension from distutils.extension import Extension
...@@ -775,7 +781,7 @@ def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet= ...@@ -775,7 +781,7 @@ def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet=
# This is the user-exposed entry point. # This is the user-exposed entry point.
def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=False, language=None, def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=False, language=None,
exclude_failures=False, **options): exclude_failures=False, np_pythran=False, **options):
""" """
Compile a set of source modules into C/C++ files and return a list of distutils Compile a set of source modules into C/C++ files and return a list of distutils
Extension objects for them. Extension objects for them.
...@@ -800,6 +806,8 @@ def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, ...@@ -800,6 +806,8 @@ def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False,
that this only really makes sense for compiling .py files which can also that this only really makes sense for compiling .py files which can also
be used without compilation. be used without compilation.
To use the Pythran backend for numpy operations, set np_pythran to True.
Additional compilation options can be passed as keyword arguments. Additional compilation options can be passed as keyword arguments.
""" """
if exclude is None: if exclude is None:
...@@ -810,6 +818,13 @@ def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, ...@@ -810,6 +818,13 @@ def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False,
if options.get('cache'): if options.get('cache'):
raise NotImplementedError("common_utility_include_dir does not yet work with caching") raise NotImplementedError("common_utility_include_dir does not yet work with caching")
safe_makedirs(options['common_utility_include_dir']) safe_makedirs(options['common_utility_include_dir'])
if np_pythran:
if not PythranAvailable:
raise RuntimeError("You first need to install Pythran to use the np_pythran flag.")
pythran_options = CompilationOptions(**options);
pythran_options.cplus = True
pythran_options.np_pythran = True
pythran_include_dir = os.path.dirname(pythran.__file__)
c_options = CompilationOptions(**options) c_options = CompilationOptions(**options)
cpp_options = CompilationOptions(**options); cpp_options.cplus = True cpp_options = CompilationOptions(**options); cpp_options.cplus = True
ctx = c_options.create_context() ctx = c_options.create_context()
...@@ -845,7 +860,13 @@ def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, ...@@ -845,7 +860,13 @@ def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False,
for source in m.sources: for source in m.sources:
base, ext = os.path.splitext(source) base, ext = os.path.splitext(source)
if ext in ('.pyx', '.py'): if ext in ('.pyx', '.py'):
if m.language == 'c++': if np_pythran:
c_file = base + '.cpp'
options = pythran_options
m.include_dirs.append(pythran_include_dir)
m.extra_compile_args.extend(('-std=c++11','-DENABLE_PYTHON_MODULE','-D__PYTHRAN__'))
m.language = 'c++'
elif m.language == 'c++':
c_file = base + '.cpp' c_file = base + '.cpp'
options = cpp_options options = cpp_options
else: else:
......
...@@ -12,7 +12,6 @@ from . import PyrexTypes ...@@ -12,7 +12,6 @@ from . import PyrexTypes
from . import Naming from . import Naming
from . import Symtab from . import Symtab
def dedent(text, reindent=0): def dedent(text, reindent=0):
from textwrap import dedent from textwrap import dedent
text = dedent(text) text = dedent(text)
......
...@@ -42,6 +42,10 @@ from . import Future ...@@ -42,6 +42,10 @@ from . import Future
from ..Debugging import print_call_chain from ..Debugging import print_call_chain
from .DebugFlags import debug_disposal_code, debug_temp_alloc, \ from .DebugFlags import debug_disposal_code, debug_temp_alloc, \
debug_coercion debug_coercion
from .Pythran import to_pythran, is_pythran_supported_type, is_pythran_supported_operation_type, \
is_pythran_expr, pythran_func_type, pythran_binop_type, pythran_unaryop_type, has_np_pythran, \
pythran_indexing_code, pythran_indexing_type, is_pythran_supported_node_or_none, pythran_type
from .PyrexTypes import PythranExpr
try: try:
from __builtin__ import basestring from __builtin__ import basestring
...@@ -113,7 +117,6 @@ coercion_error_dict = { ...@@ -113,7 +117,6 @@ coercion_error_dict = {
"Cannot convert 'char*' to unicode implicitly, decoding required"), "Cannot convert 'char*' to unicode implicitly, decoding required"),
} }
def find_coercion_error(type_tuple, default, env): def find_coercion_error(type_tuple, default, env):
err = coercion_error_dict.get(type_tuple) err = coercion_error_dict.get(type_tuple)
if err is None: if err is None:
...@@ -249,6 +252,7 @@ class ExprNode(Node): ...@@ -249,6 +252,7 @@ class ExprNode(Node):
# Cached result of subexpr_nodes() # Cached result of subexpr_nodes()
# use_managed_ref boolean use ref-counted temps/assignments/etc. # use_managed_ref boolean use ref-counted temps/assignments/etc.
# result_is_used boolean indicates that the result will be dropped and the # result_is_used boolean indicates that the result will be dropped and the
# is_numpy_attribute boolean Is a Numpy module attribute
# result_code/temp_result can safely be set to None # result_code/temp_result can safely be set to None
result_ctype = None result_ctype = None
...@@ -257,6 +261,7 @@ class ExprNode(Node): ...@@ -257,6 +261,7 @@ class ExprNode(Node):
old_temp = None # error checker for multiple frees etc. old_temp = None # error checker for multiple frees etc.
use_managed_ref = True # can be set by optimisation transforms use_managed_ref = True # can be set by optimisation transforms
result_is_used = True result_is_used = True
is_numpy_attribute = False
# The Analyse Expressions phase for expressions is split # The Analyse Expressions phase for expressions is split
# into two sub-phases: # into two sub-phases:
...@@ -437,6 +442,13 @@ class ExprNode(Node): ...@@ -437,6 +442,13 @@ class ExprNode(Node):
else: else:
return self.calculate_result_code() return self.calculate_result_code()
def pythran_result(self, type_=None):
if is_pythran_supported_node_or_none(self):
return to_pythran(self)
assert(type_ is not None)
return to_pythran(self, type_)
def is_c_result_required(self): def is_c_result_required(self):
""" """
Subtypes may return False here if result temp allocation can be skipped. Subtypes may return False here if result temp allocation can be skipped.
...@@ -876,6 +888,16 @@ class ExprNode(Node): ...@@ -876,6 +888,16 @@ class ExprNode(Node):
if not src.type.subtype_of(dst_type): if not src.type.subtype_of(dst_type):
if src.constant_result is not None: if src.constant_result is not None:
src = PyTypeTestNode(src, dst_type, env) src = PyTypeTestNode(src, dst_type, env)
elif is_pythran_expr(dst_type) and is_pythran_supported_type(src.type):
# We let the compiler decide whether this is valid
return src
elif is_pythran_expr(src.type):
if is_pythran_supported_type(dst_type):
# Match the case were a pythran expr is assigned to a value, or vice versa.
# We let the C++ compiler decide whether this is valid or not!
return src
# Else, we need to convert the Pythran expression to a Python object
src = CoerceToPyTypeNode(src, env, type=dst_type)
elif src.type.is_pyobject: elif src.type.is_pyobject:
if used_as_reference and dst_type.is_cpp_class: if used_as_reference and dst_type.is_cpp_class:
warning( warning(
...@@ -1288,7 +1310,6 @@ class IntNode(ConstNode): ...@@ -1288,7 +1310,6 @@ class IntNode(ConstNode):
def compile_time_value(self, denv): def compile_time_value(self, denv):
return Utils.str_to_number(self.value) return Utils.str_to_number(self.value)
class FloatNode(ConstNode): class FloatNode(ConstNode):
type = PyrexTypes.c_double_type type = PyrexTypes.c_double_type
...@@ -3621,15 +3642,25 @@ class IndexNode(_IndexingBaseNode): ...@@ -3621,15 +3642,25 @@ class IndexNode(_IndexingBaseNode):
replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=self.base) replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=self.base)
else: else:
replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=self.base) replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=self.base)
elif base_type.is_buffer or base_type.is_pythran_expr:
elif base_type.is_buffer and len(indices) == base_type.ndim: if base_type.is_pythran_expr or len(indices) == base_type.ndim:
# Buffer indexing # Buffer indexing
is_buffer_access = True is_buffer_access = True
indices = [index.analyse_types(env) for index in indices] indices = [index.analyse_types(env) for index in indices]
if all(index.type.is_int for index in indices): if base_type.is_pythran_expr:
replacement_node = BufferIndexNode(self.pos, indices=indices, base=self.base) do_replacement = all(index.type.is_int or index.is_slice or index.type.is_pythran_expr for index in indices)
# On cloning, indices is cloned. Otherwise, unpack index into indices. if do_replacement:
assert not isinstance(self.index, CloneNode) for i,index in enumerate(indices):
if index.is_slice:
index = SliceIntNode(index.pos, start=index.start, stop=index.stop, step=index.step)
index = index.analyse_types(env)
indices[i] = index
else:
do_replacement = all(index.type.is_int for index in indices)
if do_replacement:
replacement_node = BufferIndexNode(self.pos, indices=indices, base=self.base)
# On cloning, indices is cloned. Otherwise, unpack index into indices.
assert not isinstance(self.index, CloneNode)
if replacement_node is not None: if replacement_node is not None:
replacement_node = replacement_node.analyse_types(env, getting) replacement_node = replacement_node.analyse_types(env, getting)
...@@ -4005,7 +4036,7 @@ class BufferIndexNode(_IndexingBaseNode): ...@@ -4005,7 +4036,7 @@ class BufferIndexNode(_IndexingBaseNode):
indexing and slicing subclasses indexing and slicing subclasses
""" """
# self.indices are already analyzed # self.indices are already analyzed
if not self.base.is_name: if not self.base.is_name and not is_pythran_expr(self.base.type):
error(self.pos, "Can only index buffer variables") error(self.pos, "Can only index buffer variables")
self.type = error_type self.type = error_type
return self return self
...@@ -4024,11 +4055,14 @@ class BufferIndexNode(_IndexingBaseNode): ...@@ -4024,11 +4055,14 @@ class BufferIndexNode(_IndexingBaseNode):
return self return self
def analyse_buffer_index(self, env, getting): def analyse_buffer_index(self, env, getting):
self.base = self.base.coerce_to_simple(env) if is_pythran_expr(self.base.type):
self.type = self.base.type.dtype self.type = PythranExpr(pythran_indexing_type(self.base.type, self.indices))
else:
self.base = self.base.coerce_to_simple(env)
self.type = self.base.type.dtype
self.buffer_type = self.base.type self.buffer_type = self.base.type
if getting and self.type.is_pyobject: if getting and (self.type.is_pyobject or self.type.is_pythran_expr):
self.is_temp = True self.is_temp = True
def analyse_assignment(self, rhs): def analyse_assignment(self, rhs):
...@@ -4061,20 +4095,21 @@ class BufferIndexNode(_IndexingBaseNode): ...@@ -4061,20 +4095,21 @@ class BufferIndexNode(_IndexingBaseNode):
base = base.arg base = base.arg
return base.type.get_entry(base) return base.type.get_entry(base)
def get_index_in_temp(self, code, ivar):
ret = code.funcstate.allocate_temp(
PyrexTypes.widest_numeric_type(
ivar.type,
PyrexTypes.c_ssize_t_type if ivar.type.signed else PyrexTypes.c_size_t_type),
manage_ref=False)
code.putln("%s = %s;" % (ret, ivar.result()))
return ret
def buffer_lookup_code(self, code): def buffer_lookup_code(self, code):
""" """
ndarray[1, 2, 3] and memslice[1, 2, 3] ndarray[1, 2, 3] and memslice[1, 2, 3]
""" """
# Assign indices to temps of at least (s)size_t to allow further index calculations. # Assign indices to temps of at least (s)size_t to allow further index calculations.
index_temps = [ index_temps = [self.get_index_in_temp(code,ivar) for ivar in self.indices]
code.funcstate.allocate_temp(
PyrexTypes.widest_numeric_type(
ivar.type, PyrexTypes.c_ssize_t_type if ivar.type.signed else PyrexTypes.c_size_t_type),
manage_ref=False)
for ivar in self.indices]
for temp, index in zip(index_temps, self.indices):
code.putln("%s = %s;" % (temp, index.result()))
# Generate buffer access code using these temps # Generate buffer access code using these temps
from . import Buffer from . import Buffer
...@@ -4102,6 +4137,26 @@ class BufferIndexNode(_IndexingBaseNode): ...@@ -4102,6 +4137,26 @@ class BufferIndexNode(_IndexingBaseNode):
rhs.free_temps(code) rhs.free_temps(code)
def generate_buffer_setitem_code(self, rhs, code, op=""): def generate_buffer_setitem_code(self, rhs, code, op=""):
base_type = self.base.type
if is_pythran_expr(base_type) and is_pythran_supported_type(rhs.type):
obj = code.funcstate.allocate_temp(PythranExpr(pythran_type(self.base.type)), manage_ref=False)
# We have got to do this because we have to declare pythran objects
# at the beggining of the functions.
# Indeed, Cython uses "goto" statement for error management, and
# RAII doesn't work with that kind of construction.
# Moreover, the way Pythran expressions are made is that they don't
# support move-assignation easily.
# This, we explicitly destroy then in-place new objects in this
# case.
code.putln("__Pyx_call_destructor(%s);" % obj)
code.putln("new (&%s) decltype(%s){%s};" % (obj, obj, self.base.pythran_result()))
code.putln("%s(%s) %s= %s;" % (
obj,
pythran_indexing_code(self.indices),
op,
rhs.pythran_result()))
return
# Used from generate_assignment_code and InPlaceAssignmentNode # Used from generate_assignment_code and InPlaceAssignmentNode
buffer_entry, ptrexpr = self.buffer_lookup_code(code) buffer_entry, ptrexpr = self.buffer_lookup_code(code)
...@@ -4123,6 +4178,15 @@ class BufferIndexNode(_IndexingBaseNode): ...@@ -4123,6 +4178,15 @@ class BufferIndexNode(_IndexingBaseNode):
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result())) code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
def generate_result_code(self, code): def generate_result_code(self, code):
if is_pythran_expr(self.base.type):
res = self.result()
code.putln("__Pyx_call_destructor(%s);" % res)
code.putln("new (&%s) decltype(%s){%s(%s)};" % (
res,
res,
self.base.pythran_result(),
pythran_indexing_code(self.indices)))
return
buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code) buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code)
if self.type.is_pyobject: if self.type.is_pyobject:
# is_temp is True, so must pull out value and incref it. # is_temp is True, so must pull out value and incref it.
...@@ -4142,6 +4206,7 @@ class MemoryViewIndexNode(BufferIndexNode): ...@@ -4142,6 +4206,7 @@ class MemoryViewIndexNode(BufferIndexNode):
# memoryviewslice indexing or slicing # memoryviewslice indexing or slicing
from . import MemoryView from . import MemoryView
self.is_pythran_mode = has_np_pythran(env)
indices = self.indices indices = self.indices
have_slices, indices, newaxes = MemoryView.unellipsify(indices, self.base.type.ndim) have_slices, indices, newaxes = MemoryView.unellipsify(indices, self.base.type.ndim)
...@@ -4528,7 +4593,7 @@ class SliceIndexNode(ExprNode): ...@@ -4528,7 +4593,7 @@ class SliceIndexNode(ExprNode):
def analyse_types(self, env, getting=True): def analyse_types(self, env, getting=True):
self.base = self.base.analyse_types(env) self.base = self.base.analyse_types(env)
if self.base.type.is_memoryviewslice: if self.base.type.is_buffer or self.base.type.is_pythran_expr or self.base.type.is_memoryviewslice:
none_node = NoneNode(self.pos) none_node = NoneNode(self.pos)
index = SliceNode(self.pos, index = SliceNode(self.pos,
start=self.start or none_node, start=self.start or none_node,
...@@ -4953,6 +5018,60 @@ class SliceNode(ExprNode): ...@@ -4953,6 +5018,60 @@ class SliceNode(ExprNode):
if self.is_literal: if self.is_literal:
code.put_giveref(self.py_result()) code.put_giveref(self.py_result())
class SliceIntNode(SliceNode):
# start:stop:step in subscript list
# This is just a node to hold start,stop and step nodes that can be
# converted to integers. This does not generate a slice python object.
#
# start ExprNode
# stop ExprNode
# step ExprNode
is_temp = 0
def calculate_constant_result(self):
self.constant_result = slice(
self.start.constant_result,
self.stop.constant_result,
self.step.constant_result)
def compile_time_value(self, denv):
start = self.start.compile_time_value(denv)
stop = self.stop.compile_time_value(denv)
step = self.step.compile_time_value(denv)
try:
return slice(start, stop, step)
except Exception as e:
self.compile_time_value_error(e)
def may_be_none(self):
return False
def analyse_types(self, env):
self.start = self.start.analyse_types(env)
self.stop = self.stop.analyse_types(env)
self.step = self.step.analyse_types(env)
if not self.start.is_none:
self.start = self.start.coerce_to_integer(env)
if not self.stop.is_none:
self.stop = self.stop.coerce_to_integer(env)
if not self.step.is_none:
self.step = self.step.coerce_to_integer(env)
if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
self.is_literal = True
self.is_temp = False
return self
def calculate_result_code(self):
pass
def generate_result_code(self, code):
for a in self.start,self.stop,self.step:
if isinstance(a, CloneNode):
a.arg.result()
class CallNode(ExprNode): class CallNode(ExprNode):
...@@ -5120,7 +5239,21 @@ class SimpleCallNode(CallNode): ...@@ -5120,7 +5239,21 @@ class SimpleCallNode(CallNode):
function.obj = CloneNode(self.self) function.obj = CloneNode(self.self)
func_type = self.function_type() func_type = self.function_type()
if func_type.is_pyobject: self.is_numpy_call_with_exprs = False
if has_np_pythran(env) and self.function.is_numpy_attribute:
has_pythran_args = True
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env)
for arg in self.arg_tuple.args:
has_pythran_args &= is_pythran_supported_node_or_none(arg)
self.is_numpy_call_with_exprs = bool(has_pythran_args)
if self.is_numpy_call_with_exprs:
self.args = None
env.add_include_file("pythonic/numpy/%s.hpp" % self.function.attribute)
self.type = PythranExpr(pythran_func_type(self.function.attribute, self.arg_tuple.args))
self.may_return_none = True
self.is_temp = 1
elif func_type.is_pyobject:
self.arg_tuple = TupleNode(self.pos, args = self.args) self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env).coerce_to_pyobject(env) self.arg_tuple = self.arg_tuple.analyse_types(env).coerce_to_pyobject(env)
self.args = None self.args = None
...@@ -5494,6 +5627,12 @@ class SimpleCallNode(CallNode): ...@@ -5494,6 +5627,12 @@ class SimpleCallNode(CallNode):
if self.has_optional_args: if self.has_optional_args:
code.funcstate.release_temp(self.opt_arg_struct) code.funcstate.release_temp(self.opt_arg_struct)
@classmethod
def from_node(cls, node, **kwargs):
ret = super(SimpleCallNode, cls).from_node(node, **kwargs)
ret.is_numpy_call_with_exprs = node.is_numpy_call_with_exprs
return ret
class PyMethodCallNode(SimpleCallNode): class PyMethodCallNode(SimpleCallNode):
# Specialised call to a (potential) PyMethodObject with non-constant argument tuple. # Specialised call to a (potential) PyMethodObject with non-constant argument tuple.
...@@ -5515,6 +5654,16 @@ class PyMethodCallNode(SimpleCallNode): ...@@ -5515,6 +5654,16 @@ class PyMethodCallNode(SimpleCallNode):
for arg in args: for arg in args:
arg.generate_evaluation_code(code) arg.generate_evaluation_code(code)
if self.is_numpy_call_with_exprs:
code.putln("// function evaluation code for numpy function")
code.putln("__Pyx_call_destructor(%s);" % self.result())
code.putln("new (&%s) decltype(%s){pythonic::numpy::functor::%s{}(%s)};" % (
self.result(),
self.result(),
self.function.attribute,
", ".join(a.pythran_result() for a in self.arg_tuple.args)))
return
# make sure function is in temp so that we can replace the reference below if it's a method # make sure function is in temp so that we can replace the reference below if it's a method
reuse_function_temp = self.function.is_temp reuse_function_temp = self.function.is_temp
if reuse_function_temp: if reuse_function_temp:
...@@ -6577,6 +6726,7 @@ class AttributeNode(ExprNode): ...@@ -6577,6 +6726,7 @@ class AttributeNode(ExprNode):
self.member = self.attribute self.member = self.attribute
self.type = py_object_type self.type = py_object_type
self.is_py_attr = 1 self.is_py_attr = 1
if not obj_type.is_pyobject and not obj_type.is_error: if not obj_type.is_pyobject and not obj_type.is_error:
# Expose python methods for immutable objects. # Expose python methods for immutable objects.
if (obj_type.is_string or obj_type.is_cpp_string if (obj_type.is_string or obj_type.is_cpp_string
...@@ -9575,7 +9725,10 @@ class UnopNode(ExprNode): ...@@ -9575,7 +9725,10 @@ class UnopNode(ExprNode):
def analyse_types(self, env): def analyse_types(self, env):
self.operand = self.operand.analyse_types(env) self.operand = self.operand.analyse_types(env)
if self.is_py_operation(): if self.is_pythran_operation(env):
self.type = PythranExpr(pythran_unaryop_type(self.operator, self.operand.type))
self.is_temp = 1
elif self.is_py_operation():
self.coerce_operand_to_pyobject(env) self.coerce_operand_to_pyobject(env)
self.type = py_object_type self.type = py_object_type
self.is_temp = 1 self.is_temp = 1
...@@ -9591,6 +9744,11 @@ class UnopNode(ExprNode): ...@@ -9591,6 +9744,11 @@ class UnopNode(ExprNode):
def is_py_operation(self): def is_py_operation(self):
return self.operand.type.is_pyobject or self.operand.type.is_ctuple return self.operand.type.is_pyobject or self.operand.type.is_ctuple
def is_pythran_operation(self, env):
np_pythran = has_np_pythran(env)
op_type = self.operand.type
return np_pythran and (op_type.is_buffer or op_type.is_pythran_expr)
def nogil_check(self, env): def nogil_check(self, env):
if self.is_py_operation(): if self.is_py_operation():
self.gil_error() self.gil_error()
...@@ -9603,7 +9761,15 @@ class UnopNode(ExprNode): ...@@ -9603,7 +9761,15 @@ class UnopNode(ExprNode):
self.operand = self.operand.coerce_to_pyobject(env) self.operand = self.operand.coerce_to_pyobject(env)
def generate_result_code(self, code): def generate_result_code(self, code):
if self.operand.type.is_pyobject: if self.type.is_pythran_expr:
code.putln("// Pythran unaryop")
code.putln("__Pyx_call_destructor(%s);" % self.result())
code.putln("new (&%s) decltype(%s){%s%s};" % (
self.result(),
self.result(),
self.operator,
self.operand.pythran_result()))
elif self.operand.type.is_pyobject:
self.generate_py_operation_code(code) self.generate_py_operation_code(code)
elif self.is_temp: elif self.is_temp:
if self.is_cpp_operation() and self.exception_check == '+': if self.is_cpp_operation() and self.exception_check == '+':
...@@ -10505,7 +10671,7 @@ class BinopNode(ExprNode): ...@@ -10505,7 +10671,7 @@ class BinopNode(ExprNode):
def infer_type(self, env): def infer_type(self, env):
return self.result_type(self.operand1.infer_type(env), return self.result_type(self.operand1.infer_type(env),
self.operand2.infer_type(env)) self.operand2.infer_type(env), env)
def analyse_types(self, env): def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env) self.operand1 = self.operand1.analyse_types(env)
...@@ -10514,10 +10680,15 @@ class BinopNode(ExprNode): ...@@ -10514,10 +10680,15 @@ class BinopNode(ExprNode):
return self return self
def analyse_operation(self, env): def analyse_operation(self, env):
if self.is_py_operation(): if self.is_pythran_operation(env):
self.type = self.result_type(self.operand1.type,
self.operand2.type, env)
assert self.type.is_pythran_expr
self.is_temp = 1
elif self.is_py_operation():
self.coerce_operands_to_pyobjects(env) self.coerce_operands_to_pyobjects(env)
self.type = self.result_type(self.operand1.type, self.type = self.result_type(self.operand1.type,
self.operand2.type) self.operand2.type, env)
assert self.type.is_pyobject assert self.type.is_pyobject
self.is_temp = 1 self.is_temp = 1
elif self.is_cpp_operation(): elif self.is_cpp_operation():
...@@ -10531,6 +10702,15 @@ class BinopNode(ExprNode): ...@@ -10531,6 +10702,15 @@ class BinopNode(ExprNode):
def is_py_operation_types(self, type1, type2): def is_py_operation_types(self, type1, type2):
return type1.is_pyobject or type2.is_pyobject or type1.is_ctuple or type2.is_ctuple return type1.is_pyobject or type2.is_pyobject or type1.is_ctuple or type2.is_ctuple
def is_pythran_operation(self, env):
return self.is_pythran_operation_types(self.operand1.type, self.operand2.type, env)
def is_pythran_operation_types(self, type1, type2, env):
# Support only expr op supported_type, or supported_type op expr
return has_np_pythran(env) and \
(is_pythran_supported_operation_type(type1) and is_pythran_supported_operation_type(type2)) and \
(is_pythran_expr(type1) or is_pythran_expr(type2))
def is_cpp_operation(self): def is_cpp_operation(self):
return (self.operand1.type.is_cpp_class return (self.operand1.type.is_cpp_class
or self.operand2.type.is_cpp_class) or self.operand2.type.is_cpp_class)
...@@ -10558,7 +10738,9 @@ class BinopNode(ExprNode): ...@@ -10558,7 +10738,9 @@ class BinopNode(ExprNode):
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env) self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type self.type = func_type.return_type
def result_type(self, type1, type2): def result_type(self, type1, type2, env):
if self.is_pythran_operation_types(type1, type2, env):
return PythranExpr(pythran_binop_type(self.operator, type1, type2))
if self.is_py_operation_types(type1, type2): if self.is_py_operation_types(type1, type2):
if type2.is_string: if type2.is_string:
type2 = Builtin.bytes_type type2 = Builtin.bytes_type
...@@ -10598,8 +10780,16 @@ class BinopNode(ExprNode): ...@@ -10598,8 +10780,16 @@ class BinopNode(ExprNode):
self.operand1.is_ephemeral() or self.operand2.is_ephemeral()) self.operand1.is_ephemeral() or self.operand2.is_ephemeral())
def generate_result_code(self, code): def generate_result_code(self, code):
#print "BinopNode.generate_result_code:", self.operand1, self.operand2 ### if self.type.is_pythran_expr:
if self.operand1.type.is_pyobject: code.putln("// Pythran binop")
code.putln("__Pyx_call_destructor(%s);" % self.result())
code.putln("new (&%s) decltype(%s){%s %s %s};" % (
self.result(),
self.result(),
self.operand1.pythran_result(),
self.operator,
self.operand2.pythran_result()))
elif self.operand1.type.is_pyobject:
function = self.py_operation_function(code) function = self.py_operation_function(code)
if self.operator == '**': if self.operator == '**':
extra_args = ", Py_None" extra_args = ", Py_None"
...@@ -10959,7 +11149,7 @@ class DivNode(NumBinopNode): ...@@ -10959,7 +11149,7 @@ class DivNode(NumBinopNode):
self._check_truedivision(env) self._check_truedivision(env)
return self.result_type( return self.result_type(
self.operand1.infer_type(env), self.operand1.infer_type(env),
self.operand2.infer_type(env)) self.operand2.infer_type(env), env)
def analyse_operation(self, env): def analyse_operation(self, env):
self._check_truedivision(env) self._check_truedivision(env)
...@@ -11957,6 +12147,14 @@ class PrimaryCmpNode(ExprNode, CmpNode): ...@@ -11957,6 +12147,14 @@ class PrimaryCmpNode(ExprNode, CmpNode):
error(self.pos, "Cascading comparison not yet supported for cpp types.") error(self.pos, "Cascading comparison not yet supported for cpp types.")
return self return self
type1 = self.operand1.type
type2 = self.operand2.type
if is_pythran_expr(type1) or is_pythran_expr(type2):
if is_pythran_supported_type(type1) and is_pythran_supported_type(type2):
self.type = PythranExpr(pythran_binop_type(self.operator, type1, type2))
self.is_pycmp = False
return self
if self.analyse_memoryviewslice_comparison(env): if self.analyse_memoryviewslice_comparison(env):
return self return self
......
...@@ -315,19 +315,22 @@ class FusedCFuncDefNode(StatListNode): ...@@ -315,19 +315,22 @@ class FusedCFuncDefNode(StatListNode):
match = "dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'" match = "dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'"
no_match = "dest_sig[{{dest_sig_idx}}] = None" no_match = "dest_sig[{{dest_sig_idx}}] = None"
def _buffer_check_numpy_dtype(self, pyx_code, specialized_buffer_types): def _buffer_check_numpy_dtype(self, pyx_code, specialized_buffer_types, pythran_types):
""" """
Match a numpy dtype object to the individual specializations. Match a numpy dtype object to the individual specializations.
""" """
self._buffer_check_numpy_dtype_setup_cases(pyx_code) self._buffer_check_numpy_dtype_setup_cases(pyx_code)
for specialized_type in specialized_buffer_types: for specialized_type in pythran_types+specialized_buffer_types:
final_type = specialized_type
if specialized_type.is_pythran_expr:
specialized_type = specialized_type.org_buffer
dtype = specialized_type.dtype dtype = specialized_type.dtype
pyx_code.context.update( pyx_code.context.update(
itemsize_match=self._sizeof_dtype(dtype) + " == itemsize", itemsize_match=self._sizeof_dtype(dtype) + " == itemsize",
signed_match="not (%s_is_signed ^ dtype_signed)" % self._dtype_name(dtype), signed_match="not (%s_is_signed ^ dtype_signed)" % self._dtype_name(dtype),
dtype=dtype, dtype=dtype,
specialized_type_name=specialized_type.specialization_string) specialized_type_name=final_type.specialization_string)
dtypes = [ dtypes = [
(dtype.is_int, pyx_code.dtype_int), (dtype.is_int, pyx_code.dtype_int),
...@@ -342,8 +345,11 @@ class FusedCFuncDefNode(StatListNode): ...@@ -342,8 +345,11 @@ class FusedCFuncDefNode(StatListNode):
if dtype.is_int: if dtype.is_int:
cond += ' and {{signed_match}}' cond += ' and {{signed_match}}'
if final_type.is_pythran_expr:
cond += ' and arg_is_pythran_compatible'
if codewriter.indenter("if %s:" % cond): if codewriter.indenter("if %s:" % cond):
# codewriter.putln("print 'buffer match found based on numpy dtype'") #codewriter.putln("print 'buffer match found based on numpy dtype'")
codewriter.putln(self.match) codewriter.putln(self.match)
codewriter.putln("break") codewriter.putln("break")
codewriter.dedent() codewriter.dedent()
...@@ -388,7 +394,7 @@ class FusedCFuncDefNode(StatListNode): ...@@ -388,7 +394,7 @@ class FusedCFuncDefNode(StatListNode):
__pyx_PyErr_Clear() __pyx_PyErr_Clear()
""" % self.match) """ % self.match)
def _buffer_checks(self, buffer_types, pyx_code, decl_code, env): def _buffer_checks(self, buffer_types, pythran_types, pyx_code, decl_code, env):
""" """
Generate Cython code to match objects to buffer specializations. Generate Cython code to match objects to buffer specializations.
First try to get a numpy dtype object and match it against the individual First try to get a numpy dtype object and match it against the individual
...@@ -402,6 +408,7 @@ class FusedCFuncDefNode(StatListNode): ...@@ -402,6 +408,7 @@ class FusedCFuncDefNode(StatListNode):
if ndarray is not None: if ndarray is not None:
if isinstance(arg, ndarray): if isinstance(arg, ndarray):
dtype = arg.dtype dtype = arg.dtype
arg_is_pythran_compatible = True
elif __pyx_memoryview_check(arg): elif __pyx_memoryview_check(arg):
arg_base = arg.base arg_base = arg.base
if isinstance(arg_base, ndarray): if isinstance(arg_base, ndarray):
...@@ -415,11 +422,26 @@ class FusedCFuncDefNode(StatListNode): ...@@ -415,11 +422,26 @@ class FusedCFuncDefNode(StatListNode):
if dtype is not None: if dtype is not None:
itemsize = dtype.itemsize itemsize = dtype.itemsize
kind = ord(dtype.kind) kind = ord(dtype.kind)
# We only support the endianess of the current compiler
byteorder = dtype.byteorder
if byteorder == "<" and not __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False
if byteorder == ">" and __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False
dtype_signed = kind == 'i' dtype_signed = kind == 'i'
if arg_is_pythran_compatible:
cur_stride = itemsize
for dim,stride in zip(reversed(arg.shape),reversed(arg.strides)):
if stride != cur_stride:
arg_is_pythran_compatible = False
break
cur_stride *= dim
else:
arg_is_pythran_compatible = not (arg.flags.f_contiguous and arg.ndim > 1)
""") """)
pyx_code.indent(2) pyx_code.indent(2)
pyx_code.named_insertion_point("numpy_dtype_checks") pyx_code.named_insertion_point("numpy_dtype_checks")
self._buffer_check_numpy_dtype(pyx_code, buffer_types) self._buffer_check_numpy_dtype(pyx_code, buffer_types, pythran_types)
pyx_code.dedent(2) pyx_code.dedent(2)
for specialized_type in buffer_types: for specialized_type in buffer_types:
...@@ -446,8 +468,10 @@ class FusedCFuncDefNode(StatListNode): ...@@ -446,8 +468,10 @@ class FusedCFuncDefNode(StatListNode):
cdef Py_ssize_t itemsize cdef Py_ssize_t itemsize
cdef bint dtype_signed cdef bint dtype_signed
cdef char kind cdef char kind
cdef bint arg_is_pythran_compatible
itemsize = -1 itemsize = -1
arg_is_pythran_compatible = False
""") """)
pyx_code.imports.put_chunk( pyx_code.imports.put_chunk(
...@@ -491,7 +515,7 @@ class FusedCFuncDefNode(StatListNode): ...@@ -491,7 +515,7 @@ class FusedCFuncDefNode(StatListNode):
specialized_types.sort() specialized_types.sort()
seen_py_type_names = set() seen_py_type_names = set()
normal_types, buffer_types = [], [] normal_types, buffer_types, pythran_types = [], [], []
has_object_fallback = False has_object_fallback = False
for specialized_type in specialized_types: for specialized_type in specialized_types:
py_type_name = specialized_type.py_type_name() py_type_name = specialized_type.py_type_name()
...@@ -503,10 +527,12 @@ class FusedCFuncDefNode(StatListNode): ...@@ -503,10 +527,12 @@ class FusedCFuncDefNode(StatListNode):
has_object_fallback = True has_object_fallback = True
else: else:
normal_types.append(specialized_type) normal_types.append(specialized_type)
elif specialized_type.is_pythran_expr:
pythran_types.append(specialized_type)
elif specialized_type.is_buffer or specialized_type.is_memoryviewslice: elif specialized_type.is_buffer or specialized_type.is_memoryviewslice:
buffer_types.append(specialized_type) buffer_types.append(specialized_type)
return normal_types, buffer_types, has_object_fallback return normal_types, buffer_types, pythran_types, has_object_fallback
def _unpack_argument(self, pyx_code): def _unpack_argument(self, pyx_code):
pyx_code.put_chunk( pyx_code.put_chunk(
...@@ -533,6 +559,8 @@ class FusedCFuncDefNode(StatListNode): ...@@ -533,6 +559,8 @@ class FusedCFuncDefNode(StatListNode):
""" """
from . import TreeFragment, Code, UtilityCode from . import TreeFragment, Code, UtilityCode
env.use_utility_code(Code.UtilityCode.load_cached("IsLittleEndian","ModuleSetupCode.c"))
fused_types = self._get_fused_base_types([ fused_types = self._get_fused_base_types([
arg.type for arg in self.node.args if arg.type.is_fused]) arg.type for arg in self.node.args if arg.type.is_fused])
...@@ -549,6 +577,7 @@ class FusedCFuncDefNode(StatListNode): ...@@ -549,6 +577,7 @@ class FusedCFuncDefNode(StatListNode):
u""" u"""
cdef extern from *: cdef extern from *:
void __pyx_PyErr_Clear "PyErr_Clear" () void __pyx_PyErr_Clear "PyErr_Clear" ()
int __Pyx_Is_Little_Endian()
""") """)
decl_code.indent() decl_code.indent()
...@@ -571,8 +600,10 @@ class FusedCFuncDefNode(StatListNode): ...@@ -571,8 +600,10 @@ class FusedCFuncDefNode(StatListNode):
# instance check body # instance check body
""") """)
pyx_code.indent() # indent following code to function body pyx_code.indent() # indent following code to function body
pyx_code.named_insertion_point("imports") pyx_code.named_insertion_point("imports")
pyx_code.named_insertion_point("func_defs")
pyx_code.named_insertion_point("local_variable_declarations") pyx_code.named_insertion_point("local_variable_declarations")
fused_index = 0 fused_index = 0
...@@ -597,15 +628,15 @@ class FusedCFuncDefNode(StatListNode): ...@@ -597,15 +628,15 @@ class FusedCFuncDefNode(StatListNode):
default_idx=default_idx, default_idx=default_idx,
) )
normal_types, buffer_types, has_object_fallback = self._split_fused_types(arg) normal_types, buffer_types, pythran_types, has_object_fallback = self._split_fused_types(arg)
self._unpack_argument(pyx_code) self._unpack_argument(pyx_code)
# 'unrolled' loop, first match breaks out of it # 'unrolled' loop, first match breaks out of it
if pyx_code.indenter("while 1:"): if pyx_code.indenter("while 1:"):
if normal_types: if normal_types:
self._fused_instance_checks(normal_types, pyx_code, env) self._fused_instance_checks(normal_types, pyx_code, env)
if buffer_types: if buffer_types or pythran_types:
self._buffer_checks(buffer_types, pyx_code, decl_code, env) self._buffer_checks(buffer_types, pythran_types, pyx_code, decl_code, env)
if has_object_fallback: if has_object_fallback:
pyx_code.context.update(specialized_type_name='object') pyx_code.context.update(specialized_type_name='object')
pyx_code.putln(self.match) pyx_code.putln(self.match)
...@@ -616,6 +647,7 @@ class FusedCFuncDefNode(StatListNode): ...@@ -616,6 +647,7 @@ class FusedCFuncDefNode(StatListNode):
fused_index += 1 fused_index += 1
all_buffer_types.update(buffer_types) all_buffer_types.update(buffer_types)
all_buffer_types.update(ty.org_buffer for ty in pythran_types)
if arg.default: if arg.default:
default_idx += 1 default_idx += 1
......
...@@ -561,6 +561,10 @@ class CompilationOptions(object): ...@@ -561,6 +561,10 @@ class CompilationOptions(object):
warnings.warn(message) warnings.warn(message)
directives = dict(options['compiler_directives']) # copy mutable field directives = dict(options['compiler_directives']) # copy mutable field
if directives.get('np_pythran', False) and not options['cplus']:
import warnings
warnings.warn("C++ mode forced when in Pythran mode!")
options['cplus'] = True
options['compiler_directives'] = directives options['compiler_directives'] = directives
if 'language_level' in directives and 'language_level' not in kw: if 'language_level' in directives and 'language_level' not in kw:
options['language_level'] = int(directives['language_level']) options['language_level'] = int(directives['language_level'])
...@@ -754,4 +758,5 @@ default_options = dict( ...@@ -754,4 +758,5 @@ default_options = dict(
build_dir=None, build_dir=None,
cache=None, cache=None,
create_extension=None, create_extension=None,
np_pythran=False
) )
...@@ -22,13 +22,14 @@ from . import Nodes ...@@ -22,13 +22,14 @@ from . import Nodes
from . import Options from . import Options
from . import TypeSlots from . import TypeSlots
from . import PyrexTypes from . import PyrexTypes
from . import Pythran
from .Errors import error, warning from .Errors import error, warning
from .PyrexTypes import py_object_type from .PyrexTypes import py_object_type
from ..Utils import open_new_file, replace_suffix, decode_filename from ..Utils import open_new_file, replace_suffix, decode_filename
from .Code import UtilityCode from .Code import UtilityCode
from .StringEncoding import EncodedString from .StringEncoding import EncodedString
from .Pythran import has_np_pythran
def check_c_declarations_pxd(module_node): def check_c_declarations_pxd(module_node):
module_node.scope.check_c_classes_pxd() module_node.scope.check_c_classes_pxd()
...@@ -103,6 +104,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): ...@@ -103,6 +104,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
self.scope.merge_in(scope) self.scope.merge_in(scope)
def analyse_declarations(self, env): def analyse_declarations(self, env):
if has_np_pythran(env):
Pythran.include_pythran_generic(env)
if self.directives: if self.directives:
env.old_style_globals = self.directives['old_style_globals'] env.old_style_globals = self.directives['old_style_globals']
if not Options.docstrings: if not Options.docstrings:
...@@ -115,6 +118,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): ...@@ -115,6 +118,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
else: else:
env.doc = self.doc env.doc = self.doc
env.directives = self.directives env.directives = self.directives
self.body.analyse_declarations(env) self.body.analyse_declarations(env)
def prepare_utility_code(self): def prepare_utility_code(self):
...@@ -713,6 +717,9 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): ...@@ -713,6 +717,9 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln('static const char * %s= %s;' % (Naming.cfilenm_cname, Naming.file_c_macro)) code.putln('static const char * %s= %s;' % (Naming.cfilenm_cname, Naming.file_c_macro))
code.putln('static const char *%s;' % Naming.filename_cname) code.putln('static const char *%s;' % Naming.filename_cname)
if has_np_pythran(env):
env.use_utility_code(UtilityCode.load_cached("PythranConversion", "CppSupport.cpp"))
def generate_extern_c_macro_definition(self, code): def generate_extern_c_macro_definition(self, code):
name = Naming.extern_c_macro name = Naming.extern_c_macro
code.putln("#ifndef %s" % name) code.putln("#ifndef %s" % name)
......
...@@ -28,6 +28,7 @@ from .StringEncoding import EncodedString ...@@ -28,6 +28,7 @@ from .StringEncoding import EncodedString
from . import Future from . import Future
from . import Options from . import Options
from . import DebugFlags from . import DebugFlags
from .Pythran import has_np_pythran, pythran_type, is_pythran_buffer
from ..Utils import add_metaclass from ..Utils import add_metaclass
...@@ -1127,6 +1128,8 @@ class TemplatedTypeNode(CBaseTypeNode): ...@@ -1127,6 +1128,8 @@ class TemplatedTypeNode(CBaseTypeNode):
for name, value in options.items()]) for name, value in options.items()])
self.type = PyrexTypes.BufferType(base_type, **options) self.type = PyrexTypes.BufferType(base_type, **options)
if has_np_pythran(env) and is_pythran_buffer(self.type):
self.type = PyrexTypes.PythranExpr(pythran_type(self.type), self.type)
else: else:
# Array # Array
...@@ -2298,7 +2301,7 @@ class CFuncDefNode(FuncDefNode): ...@@ -2298,7 +2301,7 @@ class CFuncDefNode(FuncDefNode):
if type_arg.type.is_buffer and 'inline' in self.modifiers: if type_arg.type.is_buffer and 'inline' in self.modifiers:
warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1) warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1)
if type_arg.type.is_buffer: if type_arg.type.is_buffer or type_arg.type.is_pythran_expr:
if self.type.nogil: if self.type.nogil:
error(formal_arg.pos, error(formal_arg.pos,
"Buffer may not be acquired without the GIL. Consider using memoryview slices instead.") "Buffer may not be acquired without the GIL. Consider using memoryview slices instead.")
...@@ -2815,6 +2818,13 @@ class DefNode(FuncDefNode): ...@@ -2815,6 +2818,13 @@ class DefNode(FuncDefNode):
name_declarator = None name_declarator = None
else: else:
base_type = arg.base_type.analyse(env) base_type = arg.base_type.analyse(env)
# If we hare in pythran mode and we got a buffer supported by
# Pythran, we change this node to a fused type
if has_np_pythran(env) and base_type.is_pythran_expr:
base_type = PyrexTypes.FusedType([
base_type,
#PyrexTypes.PythranExpr(pythran_type(self.type, "numpy_texpr")),
base_type.org_buffer])
name_declarator, type = \ name_declarator, type = \
arg.declarator.analyse(base_type, env) arg.declarator.analyse(base_type, env)
arg.name = name_declarator.name arg.name = name_declarator.name
...@@ -2855,6 +2865,11 @@ class DefNode(FuncDefNode): ...@@ -2855,6 +2865,11 @@ class DefNode(FuncDefNode):
error(arg.pos, "Only Python type arguments can have 'or None'") error(arg.pos, "Only Python type arguments can have 'or None'")
env.fused_to_specific = f2s env.fused_to_specific = f2s
if has_np_pythran(env):
self.np_args_idx = [i for i,a in enumerate(self.args) if a.type.is_numpy_buffer]
else:
self.np_args_idx = []
def analyse_signature(self, env): def analyse_signature(self, env):
if self.entry.is_special: if self.entry.is_special:
if self.decorators: if self.decorators:
...@@ -3144,6 +3159,8 @@ class DefNodeWrapper(FuncDefNode): ...@@ -3144,6 +3159,8 @@ class DefNodeWrapper(FuncDefNode):
self.signature = target_entry.signature self.signature = target_entry.signature
self.np_args_idx = self.target.np_args_idx
def prepare_argument_coercion(self, env): def prepare_argument_coercion(self, env):
# This is only really required for Cython utility code at this time, # This is only really required for Cython utility code at this time,
# everything else can be done during code generation. But we expand # everything else can be done during code generation. But we expand
......
...@@ -175,6 +175,7 @@ _directive_defaults = { ...@@ -175,6 +175,7 @@ _directive_defaults = {
'type_version_tag': True, # enables Py_TPFLAGS_HAVE_VERSION_TAG on extension types 'type_version_tag': True, # enables Py_TPFLAGS_HAVE_VERSION_TAG on extension types
'unraisable_tracebacks': True, 'unraisable_tracebacks': True,
'old_style_globals': False, 'old_style_globals': False,
'np_pythran': False,
# set __file__ and/or __path__ to known source/target path at import time (instead of not having them available) # set __file__ and/or __path__ to known source/target path at import time (instead of not having them available)
'set_initial_path' : None, # SOURCEFILE or "/full/path/to/module" 'set_initial_path' : None, # SOURCEFILE or "/full/path/to/module"
...@@ -311,6 +312,7 @@ directive_scopes = { # defaults to available everywhere ...@@ -311,6 +312,7 @@ directive_scopes = { # defaults to available everywhere
# globals() could conceivably be controlled at a finer granularity, # globals() could conceivably be controlled at a finer granularity,
# but that would complicate the implementation # but that would complicate the implementation
'old_style_globals': ('module',), 'old_style_globals': ('module',),
'np_pythran': ('module',)
} }
......
...@@ -24,7 +24,6 @@ from .StringEncoding import EncodedString, _unicode ...@@ -24,7 +24,6 @@ from .StringEncoding import EncodedString, _unicode
from .Errors import error, warning, CompileError, InternalError from .Errors import error, warning, CompileError, InternalError
from .Code import UtilityCode from .Code import UtilityCode
class NameNodeCollector(TreeVisitor): class NameNodeCollector(TreeVisitor):
"""Collect all NameNodes of a (sub-)tree in the ``name_nodes`` """Collect all NameNodes of a (sub-)tree in the ``name_nodes``
attribute. attribute.
...@@ -601,6 +600,22 @@ class PxdPostParse(CythonTransform, SkipDeclarations): ...@@ -601,6 +600,22 @@ class PxdPostParse(CythonTransform, SkipDeclarations):
else: else:
return node return node
class TrackNumpyAttributes(CythonTransform, SkipDeclarations):
def __init__(self, context):
super(TrackNumpyAttributes, self).__init__(context)
self.numpy_module_names = set()
def visit_CImportStatNode(self, node):
if node.module_name == u"numpy":
self.numpy_module_names.add(node.as_name or u"numpy")
return node
def visit_AttributeNode(self, node):
self.visitchildren(node)
if node.obj.is_name and node.obj.name in self.numpy_module_names:
node.is_numpy_attribute = True
return node
class InterpretCompilerDirectives(CythonTransform, SkipDeclarations): class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
""" """
After parsing, directives can be stored in a number of places: After parsing, directives can be stored in a number of places:
...@@ -859,7 +874,8 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations): ...@@ -859,7 +874,8 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
if optname: if optname:
directivetype = Options.directive_types.get(optname) directivetype = Options.directive_types.get(optname)
if directivetype is bool: if directivetype is bool:
return [(optname, True)] arg = ExprNodes.BoolNode(node.pos, value=True)
return [self.try_to_parse_directive(optname, [arg], None, node.pos)]
elif directivetype is None: elif directivetype is None:
return [(optname, None)] return [(optname, None)]
else: else:
...@@ -869,6 +885,8 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations): ...@@ -869,6 +885,8 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
def try_to_parse_directive(self, optname, args, kwds, pos): def try_to_parse_directive(self, optname, args, kwds, pos):
directivetype = Options.directive_types.get(optname) directivetype = Options.directive_types.get(optname)
if optname == 'np_pythran' and not self.context.cpp:
raise PostParseError(pos, 'The %s directive can only be used in C++ mode.' % optname)
if len(args) == 1 and isinstance(args[0], ExprNodes.NoneNode): if len(args) == 1 and isinstance(args[0], ExprNodes.NoneNode):
return optname, Options.get_directive_defaults()[optname] return optname, Options.get_directive_defaults()[optname]
elif directivetype is bool: elif directivetype is bool:
......
...@@ -145,7 +145,7 @@ def create_pipeline(context, mode, exclude_classes=()): ...@@ -145,7 +145,7 @@ def create_pipeline(context, mode, exclude_classes=()):
from .ParseTreeTransforms import ForwardDeclareTypes, AnalyseDeclarationsTransform from .ParseTreeTransforms import ForwardDeclareTypes, AnalyseDeclarationsTransform
from .ParseTreeTransforms import AnalyseExpressionsTransform, FindInvalidUseOfFusedTypes from .ParseTreeTransforms import AnalyseExpressionsTransform, FindInvalidUseOfFusedTypes
from .ParseTreeTransforms import CreateClosureClasses, MarkClosureVisitor, DecoratorTransform from .ParseTreeTransforms import CreateClosureClasses, MarkClosureVisitor, DecoratorTransform
from .ParseTreeTransforms import InterpretCompilerDirectives, TransformBuiltinMethods from .ParseTreeTransforms import TrackNumpyAttributes, InterpretCompilerDirectives, TransformBuiltinMethods
from .ParseTreeTransforms import ExpandInplaceOperators, ParallelRangeTransform from .ParseTreeTransforms import ExpandInplaceOperators, ParallelRangeTransform
from .ParseTreeTransforms import CalculateQualifiedNamesTransform from .ParseTreeTransforms import CalculateQualifiedNamesTransform
from .TypeInference import MarkParallelAssignments, MarkOverflowingArithmetic from .TypeInference import MarkParallelAssignments, MarkOverflowingArithmetic
...@@ -183,6 +183,7 @@ def create_pipeline(context, mode, exclude_classes=()): ...@@ -183,6 +183,7 @@ def create_pipeline(context, mode, exclude_classes=()):
NormalizeTree(context), NormalizeTree(context),
PostParse(context), PostParse(context),
_specific_post_parse, _specific_post_parse,
TrackNumpyAttributes(context),
InterpretCompilerDirectives(context, context.compiler_directives), InterpretCompilerDirectives(context, context.compiler_directives),
ParallelRangeTransform(context), ParallelRangeTransform(context),
AdjustDefByDirectives(context), AdjustDefByDirectives(context),
......
...@@ -186,6 +186,8 @@ class PyrexType(BaseType): ...@@ -186,6 +186,8 @@ class PyrexType(BaseType):
# is_returncode boolean Is used only to signal exceptions # is_returncode boolean Is used only to signal exceptions
# is_error boolean Is the dummy error type # is_error boolean Is the dummy error type
# is_buffer boolean Is buffer access type # is_buffer boolean Is buffer access type
# is_pythran_expr boolean Is Pythran expr
# is_numpy_buffer boolean Is Numpy array buffer
# has_attributes boolean Has C dot-selectable attributes # has_attributes boolean Has C dot-selectable attributes
# default_value string Initial value # default_value string Initial value
# entry Entry The Entry for this type # entry Entry The Entry for this type
...@@ -245,6 +247,8 @@ class PyrexType(BaseType): ...@@ -245,6 +247,8 @@ class PyrexType(BaseType):
is_buffer = 0 is_buffer = 0
is_ctuple = 0 is_ctuple = 0
is_memoryviewslice = 0 is_memoryviewslice = 0
is_pythran_expr = 0
is_numpy_buffer = 0
has_attributes = 0 has_attributes = 0
default_value = "" default_value = ""
...@@ -1008,6 +1012,7 @@ class BufferType(BaseType): ...@@ -1008,6 +1012,7 @@ class BufferType(BaseType):
self.mode = mode self.mode = mode
self.negative_indices = negative_indices self.negative_indices = negative_indices
self.cast = cast self.cast = cast
self.is_numpy_buffer = self.base.name == "ndarray"
def can_coerce_to_pyobject(self,env): def can_coerce_to_pyobject(self,env):
return True return True
...@@ -1451,6 +1456,40 @@ class CType(PyrexType): ...@@ -1451,6 +1456,40 @@ class CType(PyrexType):
source_code, source_code,
code.error_goto_if(error_condition or self.error_condition(result_code), error_pos)) code.error_goto_if(error_condition or self.error_condition(result_code), error_pos))
class PythranExpr(CType):
# Pythran object of a given type
to_py_function = "to_python_from_expr"
is_pythran_expr = True
writable = True
has_attributes = 1
def __init__(self, pythran_type, org_buffer=None):
self.org_buffer = org_buffer
self.pythran_type = pythran_type
self.name = self.pythran_type
self.cname = self.pythran_type
self.from_py_function = "from_python<%s>" % (self.pythran_type)
self.scope = None
def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0):
assert pyrex == 0
return "%s %s" % (self.name, entity_code)
def attributes_known(self):
if self.scope is None:
from . import Symtab
self.scope = scope = Symtab.CClassScope(
'',
None,
visibility="extern")
scope.parent_type = self
scope.directives = {}
# rank 3 == long
scope.declare_var("shape", CPtrType(CIntType(3)), None, cname="_shape", is_cdef=True)
scope.declare_var("ndim", CIntType(3), None, cname="value", is_cdef=True)
return True
class CConstType(BaseType): class CConstType(BaseType):
...@@ -4120,13 +4159,17 @@ def best_match(arg_types, functions, pos=None, env=None, args=None): ...@@ -4120,13 +4159,17 @@ def best_match(arg_types, functions, pos=None, env=None, args=None):
# function that takes a char *, the coercion will mean that the # function that takes a char *, the coercion will mean that the
# type will simply become bytes. We need to do this coercion # type will simply become bytes. We need to do this coercion
# manually for overloaded and fused functions # manually for overloaded and fused functions
if not assignable and src_type.is_pyobject: if not assignable:
if src_type.is_builtin_type and src_type.name == 'str' and dst_type.resolve().is_string: c_src_type = None
c_src_type = dst_type.resolve() if src_type.is_pyobject:
else: if src_type.is_builtin_type and src_type.name == 'str' and dst_type.resolve().is_string:
c_src_type = src_type.default_coerced_ctype() c_src_type = dst_type.resolve()
else:
c_src_type = src_type.default_coerced_ctype()
elif src_type.is_pythran_expr:
c_src_type = src_type.org_buffer
if c_src_type: if c_src_type is not None:
assignable = dst_type.assignable_from(c_src_type) assignable = dst_type.assignable_from(c_src_type)
if assignable: if assignable:
src_type = c_src_type src_type = c_src_type
......
from .PyrexTypes import BufferType, CType, CTypedefType, CStructOrUnionType
_pythran_var_prefix = "__pythran__"
# Pythran/Numpy specific operations
def has_np_pythran(env):
while not env is None:
if hasattr(env, "directives") and env.directives.get('np_pythran', False):
return True
env = env.outer_scope
def is_pythran_supported_dtype(type_):
if isinstance(type_, CTypedefType):
return is_pythran_supported_type(type_.typedef_base_type)
return type_.is_numeric
def pythran_type(Ty,ptype="ndarray"):
if Ty.is_buffer:
ndim,dtype = Ty.ndim, Ty.dtype
if isinstance(dtype, CStructOrUnionType):
ctype = dtype.cname
elif isinstance(dtype, CType):
ctype = dtype.sign_and_name()
elif isinstance(dtype, CTypedefType):
ctype = dtype.typedef_cname
else:
raise ValueError("unsupported type %s!" % str(dtype))
return "pythonic::types::%s<%s,%d>" % (ptype,ctype, ndim)
from .PyrexTypes import PythranExpr
if Ty.is_pythran_expr:
return Ty.pythran_type
#if Ty.is_none:
# return "decltype(pythonic::__builtin__::None)"
if Ty.is_numeric:
return Ty.sign_and_name()
raise ValueError("unsupported pythran type %s (%s)" % (str(Ty), str(type(Ty))))
return None
def type_remove_ref(ty):
return "typename std::remove_reference<%s>::type" % ty
def pythran_binop_type(op, tA, tB):
return "decltype(std::declval<%s>() %s std::declval<%s>())" % \
(pythran_type(tA), op, pythran_type(tB))
def pythran_unaryop_type(op, type_):
return "decltype(%sstd::declval<%s>())" % (
op, pythran_type(type_))
def pythran_indexing_type(type_, indices):
def index_code(idx):
if idx.is_slice:
if idx.step.is_none:
func = "contiguous_slice"
n = 2
else:
func = "slice"
n = 3
return "pythonic::types::%s(%s)" % (func,",".join(["0"]*n))
elif idx.type.is_int:
return "std::declval<long>()"
elif idx.type.is_pythran_expr:
return "std::declval<%s>()" % idx.type.pythran_type
raise ValueError("unsupported indice type %s!" % idx.type)
indexing = ",".join(index_code(idx) for idx in indices)
return type_remove_ref("decltype(std::declval<%s>()(%s))" % (pythran_type(type_), indexing))
def pythran_indexing_code(indices):
def index_code(idx):
if idx.is_slice:
values = idx.start, idx.stop, idx.step
if idx.step.is_none:
func = "contiguous_slice"
values = values[:2]
else:
func = "slice"
return "pythonic::types::%s(%s)" % (func,",".join((v.pythran_result() for v in values)))
elif idx.type.is_int:
return idx.result()
elif idx.type.is_pythran_expr:
return idx.pythran_result()
raise ValueError("unsupported indice type %s!" % str(idx.type))
return ",".join(index_code(idx) for idx in indices)
def pythran_func_type(func, args):
args = ",".join(("std::declval<%s>()" % pythran_type(a.type) for a in args))
return "decltype(pythonic::numpy::functor::%s{}(%s))" % (func, args)
def to_pythran(op,ptype=None):
op_type = op.type
if is_type(op_type,["is_pythran_expr", "is_int", "is_numeric", "is_float",
"is_complex"]):
return op.result()
if op.is_none:
return "pythonic::__builtin__::None"
if ptype is None:
ptype = pythran_type(op_type)
assert(op.type.is_pyobject)
return "from_python<%s>(%s)" % (ptype, op.py_result())
def from_pythran():
return "to_python"
def is_type(type_, types):
for attr in types:
if getattr(type_, attr, False):
return True
return False
def is_pythran_supported_node_or_none(node):
return node.is_none or is_pythran_supported_type(node.type)
def is_pythran_supported_type(type_):
pythran_supported = (
"is_pythran_expr", "is_int", "is_numeric", "is_float", "is_none",
"is_complex")
return is_type(type_, pythran_supported) or is_pythran_expr(type_)
def is_pythran_supported_operation_type(type_):
pythran_supported = (
"is_pythran_expr", "is_int", "is_numeric", "is_float", "is_complex")
return is_type(type_,pythran_supported) or is_pythran_expr(type_)
def is_pythran_expr(type_):
return type_.is_pythran_expr
def is_pythran_buffer(type_):
return type_.is_numpy_buffer and is_pythran_supported_dtype(type_.dtype) and \
type_.mode in ("c","strided") and not type_.cast
def include_pythran_generic(env):
# Generic files
env.add_include_file("pythonic/core.hpp")
env.add_include_file("pythonic/python/core.hpp")
env.add_include_file("pythonic/types/bool.hpp")
env.add_include_file("pythonic/types/ndarray.hpp")
env.add_include_file("<new>") # for placement new
for i in (8,16,32,64):
env.add_include_file("pythonic/types/uint%d.hpp" % i)
env.add_include_file("pythonic/types/int%d.hpp" % i)
for t in ("float", "float32", "float64", "set", "slice", "tuple", "int",
"long", "complex", "complex64", "complex128"):
env.add_include_file("pythonic/types/%s.hpp" % t)
def include_pythran_type(env, type_):
pass
def type_is_numpy(type_):
if not hasattr(type_, "is_numpy"):
return False
return type_.is_numpy
...@@ -1205,6 +1205,10 @@ class ModuleScope(Scope): ...@@ -1205,6 +1205,10 @@ class ModuleScope(Scope):
scope = scope.find_submodule(submodule) scope = scope.find_submodule(submodule)
return scope return scope
def generate_library_function_declarations(self, code):
if self.directives['np_pythran']:
code.putln("import_array();")
def lookup_submodule(self, name): def lookup_submodule(self, name):
# Return scope for submodule of this module, or None. # Return scope for submodule of this module, or None.
if '.' in name: if '.' in name:
......
...@@ -342,28 +342,28 @@ cdef extern from "numpy/arrayobject.h": ...@@ -342,28 +342,28 @@ cdef extern from "numpy/arrayobject.h":
double imag double imag
ctypedef struct npy_clongdouble: ctypedef struct npy_clongdouble:
double real long double real
double imag long double imag
ctypedef struct npy_complex64: ctypedef struct npy_complex64:
double real float real
double imag float imag
ctypedef struct npy_complex128: ctypedef struct npy_complex128:
double real double real
double imag double imag
ctypedef struct npy_complex160: ctypedef struct npy_complex160:
double real long double real
double imag long double imag
ctypedef struct npy_complex192: ctypedef struct npy_complex192:
double real long double real
double imag long double imag
ctypedef struct npy_complex256: ctypedef struct npy_complex256:
double real long double real
double imag long double imag
ctypedef struct PyArray_Dims: ctypedef struct PyArray_Dims:
npy_intp *ptr npy_intp *ptr
......
...@@ -46,3 +46,13 @@ static void __Pyx_CppExn2PyErr() { ...@@ -46,3 +46,13 @@ static void __Pyx_CppExn2PyErr() {
} }
} }
#endif #endif
/////////////// PythranConversion.proto ///////////////
template <class T>
auto to_python_from_expr(T &&value) -> decltype(to_python(
typename pythonic::returnable<typename std::remove_cv<typename std::remove_reference<T>::type>::type>::type{std::forward<T>(value)}))
{
using returnable_type = typename pythonic::returnable<typename std::remove_cv<typename std::remove_reference<T>::type>::type>::type;
return to_python(returnable_type{std::forward<T>(value)});
}
...@@ -420,6 +420,19 @@ ...@@ -420,6 +420,19 @@
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
/////////////// CInitCode /////////////// /////////////// CInitCode ///////////////
...@@ -662,6 +675,22 @@ static int __Pyx_check_binary_version(void) { ...@@ -662,6 +675,22 @@ static int __Pyx_check_binary_version(void) {
return 0; return 0;
} }
/////////////// IsLittleEndian.proto ///////////////
static int __Pyx_Is_Little_Endian(void);
/////////////// IsLittleEndian ///////////////
static int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/////////////// Refnanny.proto /////////////// /////////////// Refnanny.proto ///////////////
#ifndef CYTHON_REFNANNY #ifndef CYTHON_REFNANNY
......
...@@ -23,6 +23,7 @@ Contents: ...@@ -23,6 +23,7 @@ Contents:
parallelism parallelism
debugging debugging
numpy_tutorial numpy_tutorial
numpy_pythran
Indices and tables Indices and tables
------------------ ------------------
......
.. highlight:: python
.. _numpy-pythran:
**************************
Pythran as a Numpy backend
**************************
Using the flag ``--np-pythran``, it is possible to use the `Pythran`_ numpy
implementation for numpy related operations. One advantage to use this backend
is that the Pythran implementation uses C++ expression templates to save memory
transfers and can benefit from SIMD instructions of modern CPU.
This can lead to really interesting speedup in some cases, going from 2 up to
16, depending on the targeted CPU architecture and the original algorithm.
Please note that this feature is experimental.
Usage example with distutils
----------------------------
You first need to install Pythran. See its `documentation
<https://pythonhosted.org/pythran/MANUAL.html>`_ for more information.
Then, simply add ``np_pythran=True`` to the ``cythonize`` call in the related
setup.py.
Here is an example:
.. code::
from distutils.core import setup
from Cython.Build import cythonize
setup(
name = "My hello app",
ext_modules = cythonize('hello_pythran.pyx', np_pythran=True)
)
.. _Pythran: https://github.com/serge-sans-paille/pythran
...@@ -487,7 +487,7 @@ class TestBuilder(object): ...@@ -487,7 +487,7 @@ class TestBuilder(object):
cleanup_workdir, cleanup_sharedlibs, cleanup_failures, cleanup_workdir, cleanup_sharedlibs, cleanup_failures,
with_pyregr, cython_only, languages, test_bugs, fork, language_level, with_pyregr, cython_only, languages, test_bugs, fork, language_level,
test_determinism, test_determinism,
common_utility_dir): common_utility_dir, pythran_dir=None):
self.rootdir = rootdir self.rootdir = rootdir
self.workdir = workdir self.workdir = workdir
self.selectors = selectors self.selectors = selectors
...@@ -504,6 +504,7 @@ class TestBuilder(object): ...@@ -504,6 +504,7 @@ class TestBuilder(object):
self.language_level = language_level self.language_level = language_level
self.test_determinism = test_determinism self.test_determinism = test_determinism
self.common_utility_dir = common_utility_dir self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
def build_suite(self): def build_suite(self):
suite = unittest.TestSuite() suite = unittest.TestSuite()
...@@ -607,13 +608,14 @@ class TestBuilder(object): ...@@ -607,13 +608,14 @@ class TestBuilder(object):
preparse_list = tags.get('preparse', ['id']) preparse_list = tags.get('preparse', ['id'])
tests = [ self.build_test(test_class, path, workdir, module, tags, language, tests = [ self.build_test(test_class, path, workdir, module, tags, language,
expect_errors, expect_warnings, warning_errors, preparse) expect_errors, expect_warnings, warning_errors, preparse,
self.pythran_dir if language == "cpp" else None)
for language in languages for language in languages
for preparse in preparse_list ] for preparse in preparse_list ]
return tests return tests
def build_test(self, test_class, path, workdir, module, tags, language, def build_test(self, test_class, path, workdir, module, tags, language,
expect_errors, expect_warnings, warning_errors, preparse): expect_errors, expect_warnings, warning_errors, preparse, pythran_dir):
language_workdir = os.path.join(workdir, language) language_workdir = os.path.join(workdir, language)
if not os.path.exists(language_workdir): if not os.path.exists(language_workdir):
os.makedirs(language_workdir) os.makedirs(language_workdir)
...@@ -634,7 +636,8 @@ class TestBuilder(object): ...@@ -634,7 +636,8 @@ class TestBuilder(object):
language_level=self.language_level, language_level=self.language_level,
warning_errors=warning_errors, warning_errors=warning_errors,
test_determinism=self.test_determinism, test_determinism=self.test_determinism,
common_utility_dir=self.common_utility_dir) common_utility_dir=self.common_utility_dir,
pythran_dir=pythran_dir)
class CythonCompileTestCase(unittest.TestCase): class CythonCompileTestCase(unittest.TestCase):
...@@ -643,7 +646,7 @@ class CythonCompileTestCase(unittest.TestCase): ...@@ -643,7 +646,7 @@ class CythonCompileTestCase(unittest.TestCase):
cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False, cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False,
fork=True, language_level=2, warning_errors=False, fork=True, language_level=2, warning_errors=False,
test_determinism=False, test_determinism=False,
common_utility_dir=None): common_utility_dir=None, pythran_dir=None):
self.test_directory = test_directory self.test_directory = test_directory
self.tags = tags self.tags = tags
self.workdir = workdir self.workdir = workdir
...@@ -663,10 +666,11 @@ class CythonCompileTestCase(unittest.TestCase): ...@@ -663,10 +666,11 @@ class CythonCompileTestCase(unittest.TestCase):
self.warning_errors = warning_errors self.warning_errors = warning_errors
self.test_determinism = test_determinism self.test_determinism = test_determinism
self.common_utility_dir = common_utility_dir self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
unittest.TestCase.__init__(self) unittest.TestCase.__init__(self)
def shortDescription(self): def shortDescription(self):
return "compiling (%s) %s" % (self.language, self.name) return "compiling (%s%s) %s" % (self.language, "/pythran" if self.pythran_dir is not None else "", self.name)
def setUp(self): def setUp(self):
from Cython.Compiler import Options from Cython.Compiler import Options
...@@ -846,6 +850,7 @@ class CythonCompileTestCase(unittest.TestCase): ...@@ -846,6 +850,7 @@ class CythonCompileTestCase(unittest.TestCase):
annotate = annotate, annotate = annotate,
use_listing_file = False, use_listing_file = False,
cplus = self.language == 'cpp', cplus = self.language == 'cpp',
np_pythran = self.pythran_dir is not None,
language_level = self.language_level, language_level = self.language_level,
generate_pxi = False, generate_pxi = False,
evaluate_tree_assertions = True, evaluate_tree_assertions = True,
...@@ -875,6 +880,9 @@ class CythonCompileTestCase(unittest.TestCase): ...@@ -875,6 +880,9 @@ class CythonCompileTestCase(unittest.TestCase):
if extra_extension_args is None: if extra_extension_args is None:
extra_extension_args = {} extra_extension_args = {}
if self.pythran_dir is not None:
ext_compile_flags.extend(['-I',self.pythran_dir,'-DENABLE_PYTHON_MODULE','-std=c++11','-D__PYTHRAN__=2','-Wno-cpp'])
related_files = self.related_files(test_directory, module) related_files = self.related_files(test_directory, module)
self.copy_files(test_directory, workdir, related_files) self.copy_files(test_directory, workdir, related_files)
...@@ -1044,7 +1052,7 @@ class CythonRunTestCase(CythonCompileTestCase): ...@@ -1044,7 +1052,7 @@ class CythonRunTestCase(CythonCompileTestCase):
if self.cython_only: if self.cython_only:
return CythonCompileTestCase.shortDescription(self) return CythonCompileTestCase.shortDescription(self)
else: else:
return "compiling (%s) and running %s" % (self.language, self.name) return "compiling (%s%s) and running %s" % (self.language, "/pythran" if self.pythran_dir is not None else "", self.name)
def run(self, result=None): def run(self, result=None):
if result is None: if result is None:
...@@ -1839,6 +1847,8 @@ def main(): ...@@ -1839,6 +1847,8 @@ def main():
parser.add_option("--use_formal_grammar", default=False, action="store_true") parser.add_option("--use_formal_grammar", default=False, action="store_true")
parser.add_option("--test_determinism", default=False, action="store_true", parser.add_option("--test_determinism", default=False, action="store_true",
help="test whether Cython's output is deterministic") help="test whether Cython's output is deterministic")
parser.add_option("--pythran-dir", dest="pythran_dir", default=None,
help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy")
options, cmd_args = parser.parse_args(args) options, cmd_args = parser.parse_args(args)
...@@ -2095,7 +2105,7 @@ def runtests(options, cmd_args, coverage=None): ...@@ -2095,7 +2105,7 @@ def runtests(options, cmd_args, coverage=None):
options.cython_only, languages, test_bugs, options.cython_only, languages, test_bugs,
options.fork, options.language_level, options.fork, options.language_level,
options.test_determinism, options.test_determinism,
common_utility_dir) common_utility_dir, options.pythran_dir)
test_suite.addTest(filetests.build_suite()) test_suite.addTest(filetests.build_suite())
if options.system_pyregr and languages: if options.system_pyregr and languages:
...@@ -2110,7 +2120,7 @@ def runtests(options, cmd_args, coverage=None): ...@@ -2110,7 +2120,7 @@ def runtests(options, cmd_args, coverage=None):
options.cython_only, languages, test_bugs, options.cython_only, languages, test_bugs,
options.fork, sys.version_info[0], options.fork, sys.version_info[0],
options.test_determinism, options.test_determinism,
common_utility_dir) common_utility_dir, options.pythran_dir)
sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir) sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir)
test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr')) test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr'))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment