Commit 6030d91c authored by scoder's avatar scoder Committed by GitHub

Merge pull request #1607 from aguinet/feature/pythran

Add a Pythran backend for Numpy operation
parents 0d6be442 8d727b67
......@@ -16,6 +16,7 @@ except ImportError:
gzip_ext = ''
import shutil
import subprocess
import os
try:
import hashlib
......@@ -42,8 +43,14 @@ except ImportError:
return os.path.curdir
return os.path.join(*rel_list)
try:
import pythran
PythranAvailable = True
except:
PythranAvailable = False
from distutils.extension import Extension
from distutils.util import strtobool
from .. import Utils
from ..Utils import (cached_function, cached_method, path_exists,
......@@ -157,6 +164,7 @@ def parse_list(s):
transitive_str = object()
transitive_list = object()
bool_or = object()
distutils_settings = {
'name': str,
......@@ -173,6 +181,7 @@ distutils_settings = {
'export_symbols': list,
'depends': transitive_list,
'language': transitive_str,
'np_pythran': bool_or
}
......@@ -204,19 +213,23 @@ class DistutilsInfo(object):
if line[0] != '#':
break
line = line[1:].lstrip()
if line[:10] == 'distutils:':
key, _, value = [s.strip() for s in line[10:].partition('=')]
type = distutils_settings[key]
kind = next((k for k in ("distutils:","cython:") if line.startswith(k)), None)
if not kind is None:
key, _, value = [s.strip() for s in line[len(kind):].partition('=')]
type = distutils_settings.get(key, None)
if line.startswith("cython:") and type is None: continue
if type in (list, transitive_list):
value = parse_list(value)
if key == 'define_macros':
value = [tuple(macro.split('=', 1))
if '=' in macro else (macro, None)
for macro in value]
if type is bool_or:
value = strtobool(value)
self.values[key] = value
elif exn is not None:
for key in distutils_settings:
if key in ('name', 'sources'):
if key in ('name', 'sources','np_pythran'):
continue
value = getattr(exn, key, None)
if value:
......@@ -238,6 +251,8 @@ class DistutilsInfo(object):
all.append(v)
value = all
self.values[key] = value
elif type is bool_or:
self.values[key] = self.values.get(key, False) | value
return self
def subs(self, aliases):
......@@ -788,8 +803,30 @@ def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet=
if ext_language and 'language' not in kwds:
kwds['language'] = ext_language
np_pythran = kwds.pop('np_pythran', False)
# Create the new extension
m, metadata = create_extension(template, kwds)
if np_pythran:
if not PythranAvailable:
raise RuntimeError("You first need to install Pythran to use the np_pythran directive.")
pythran_ext = pythran.config.make_extension()
m.include_dirs.extend(pythran_ext['include_dirs'])
m.extra_compile_args.extend(pythran_ext['extra_compile_args'])
m.extra_link_args.extend(pythran_ext['extra_link_args'])
m.define_macros.extend(pythran_ext['define_macros'])
m.undef_macros.extend(pythran_ext['undef_macros'])
m.library_dirs.extend(pythran_ext['library_dirs'])
m.libraries.extend(pythran_ext['libraries'])
# These options are not compatible with the way normal Cython extensions work
try:
m.extra_compile_args.remove("-fwhole-program")
except ValueError: pass
try:
m.extra_compile_args.remove("-fvisibility=hidden")
except ValueError: pass
m.language = 'c++'
m.np_pythran = np_pythran
module_list.append(m)
# Store metadata (this will be written as JSON in the
......@@ -841,6 +878,11 @@ def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False,
if options.get('cache'):
raise NotImplementedError("common_utility_include_dir does not yet work with caching")
safe_makedirs(options['common_utility_include_dir'])
if PythranAvailable:
pythran_options = CompilationOptions(**options);
pythran_options.cplus = True
pythran_options.np_pythran = True
pythran_include_dir = os.path.dirname(pythran.__file__)
c_options = CompilationOptions(**options)
cpp_options = CompilationOptions(**options); cpp_options.cplus = True
ctx = c_options.create_context()
......@@ -876,7 +918,10 @@ def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False,
for source in m.sources:
base, ext = os.path.splitext(source)
if ext in ('.pyx', '.py'):
if m.language == 'c++':
if m.np_pythran:
c_file = base + '.cpp'
options = pythran_options
elif m.language == 'c++':
c_file = base + '.cpp'
options = cpp_options
else:
......
......@@ -12,7 +12,6 @@ from . import PyrexTypes
from . import Naming
from . import Symtab
def dedent(text, reindent=0):
from textwrap import dedent
text = dedent(text)
......
This diff is collapsed.
......@@ -315,19 +315,22 @@ class FusedCFuncDefNode(StatListNode):
match = "dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'"
no_match = "dest_sig[{{dest_sig_idx}}] = None"
def _buffer_check_numpy_dtype(self, pyx_code, specialized_buffer_types):
def _buffer_check_numpy_dtype(self, pyx_code, specialized_buffer_types, pythran_types):
"""
Match a numpy dtype object to the individual specializations.
"""
self._buffer_check_numpy_dtype_setup_cases(pyx_code)
for specialized_type in specialized_buffer_types:
for specialized_type in pythran_types+specialized_buffer_types:
final_type = specialized_type
if specialized_type.is_pythran_expr:
specialized_type = specialized_type.org_buffer
dtype = specialized_type.dtype
pyx_code.context.update(
itemsize_match=self._sizeof_dtype(dtype) + " == itemsize",
signed_match="not (%s_is_signed ^ dtype_signed)" % self._dtype_name(dtype),
dtype=dtype,
specialized_type_name=specialized_type.specialization_string)
specialized_type_name=final_type.specialization_string)
dtypes = [
(dtype.is_int, pyx_code.dtype_int),
......@@ -342,8 +345,11 @@ class FusedCFuncDefNode(StatListNode):
if dtype.is_int:
cond += ' and {{signed_match}}'
if final_type.is_pythran_expr:
cond += ' and arg_is_pythran_compatible'
if codewriter.indenter("if %s:" % cond):
# codewriter.putln("print 'buffer match found based on numpy dtype'")
#codewriter.putln("print 'buffer match found based on numpy dtype'")
codewriter.putln(self.match)
codewriter.putln("break")
codewriter.dedent()
......@@ -388,7 +394,7 @@ class FusedCFuncDefNode(StatListNode):
__pyx_PyErr_Clear()
""" % self.match)
def _buffer_checks(self, buffer_types, pyx_code, decl_code, env):
def _buffer_checks(self, buffer_types, pythran_types, pyx_code, decl_code, env):
"""
Generate Cython code to match objects to buffer specializations.
First try to get a numpy dtype object and match it against the individual
......@@ -402,6 +408,7 @@ class FusedCFuncDefNode(StatListNode):
if ndarray is not None:
if isinstance(arg, ndarray):
dtype = arg.dtype
arg_is_pythran_compatible = True
elif __pyx_memoryview_check(arg):
arg_base = arg.base
if isinstance(arg_base, ndarray):
......@@ -415,11 +422,26 @@ class FusedCFuncDefNode(StatListNode):
if dtype is not None:
itemsize = dtype.itemsize
kind = ord(dtype.kind)
# We only support the endianess of the current compiler
byteorder = dtype.byteorder
if byteorder == "<" and not __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False
if byteorder == ">" and __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False
dtype_signed = kind == 'i'
if arg_is_pythran_compatible:
cur_stride = itemsize
for dim,stride in zip(reversed(arg.shape),reversed(arg.strides)):
if stride != cur_stride:
arg_is_pythran_compatible = False
break
cur_stride *= dim
else:
arg_is_pythran_compatible = not (arg.flags.f_contiguous and arg.ndim > 1)
""")
pyx_code.indent(2)
pyx_code.named_insertion_point("numpy_dtype_checks")
self._buffer_check_numpy_dtype(pyx_code, buffer_types)
self._buffer_check_numpy_dtype(pyx_code, buffer_types, pythran_types)
pyx_code.dedent(2)
for specialized_type in buffer_types:
......@@ -446,8 +468,10 @@ class FusedCFuncDefNode(StatListNode):
cdef Py_ssize_t itemsize
cdef bint dtype_signed
cdef char kind
cdef bint arg_is_pythran_compatible
itemsize = -1
arg_is_pythran_compatible = False
""")
pyx_code.imports.put_chunk(
......@@ -487,7 +511,7 @@ class FusedCFuncDefNode(StatListNode):
specialized_types.sort()
seen_py_type_names = set()
normal_types, buffer_types = [], []
normal_types, buffer_types, pythran_types = [], [], []
has_object_fallback = False
for specialized_type in specialized_types:
py_type_name = specialized_type.py_type_name()
......@@ -499,10 +523,12 @@ class FusedCFuncDefNode(StatListNode):
has_object_fallback = True
else:
normal_types.append(specialized_type)
elif specialized_type.is_pythran_expr:
pythran_types.append(specialized_type)
elif specialized_type.is_buffer or specialized_type.is_memoryviewslice:
buffer_types.append(specialized_type)
return normal_types, buffer_types, has_object_fallback
return normal_types, buffer_types, pythran_types, has_object_fallback
def _unpack_argument(self, pyx_code):
pyx_code.put_chunk(
......@@ -534,6 +560,8 @@ class FusedCFuncDefNode(StatListNode):
"""
from . import TreeFragment, Code, UtilityCode
env.use_utility_code(Code.UtilityCode.load_cached("IsLittleEndian","ModuleSetupCode.c"))
fused_types = self._get_fused_base_types([
arg.type for arg in self.node.args if arg.type.is_fused])
......@@ -555,6 +583,7 @@ class FusedCFuncDefNode(StatListNode):
cdef extern from *:
void __pyx_PyErr_Clear "PyErr_Clear" ()
type __Pyx_ImportNumPyArrayTypeIfAvailable()
int __Pyx_Is_Little_Endian()
""")
decl_code.indent()
......@@ -573,8 +602,10 @@ class FusedCFuncDefNode(StatListNode):
# instance check body
""")
pyx_code.indent() # indent following code to function body
pyx_code.named_insertion_point("imports")
pyx_code.named_insertion_point("func_defs")
pyx_code.named_insertion_point("local_variable_declarations")
fused_index = 0
......@@ -599,15 +630,15 @@ class FusedCFuncDefNode(StatListNode):
default_idx=default_idx,
)
normal_types, buffer_types, has_object_fallback = self._split_fused_types(arg)
normal_types, buffer_types, pythran_types, has_object_fallback = self._split_fused_types(arg)
self._unpack_argument(pyx_code)
# 'unrolled' loop, first match breaks out of it
if pyx_code.indenter("while 1:"):
if normal_types:
self._fused_instance_checks(normal_types, pyx_code, env)
if buffer_types:
self._buffer_checks(buffer_types, pyx_code, decl_code, env)
if buffer_types or pythran_types:
self._buffer_checks(buffer_types, pythran_types, pyx_code, decl_code, env)
if has_object_fallback:
pyx_code.context.update(specialized_type_name='object')
pyx_code.putln(self.match)
......@@ -618,6 +649,7 @@ class FusedCFuncDefNode(StatListNode):
fused_index += 1
all_buffer_types.update(buffer_types)
all_buffer_types.update(ty.org_buffer for ty in pythran_types)
if arg.default:
default_idx += 1
......
......@@ -561,6 +561,10 @@ class CompilationOptions(object):
warnings.warn(message)
directives = dict(options['compiler_directives']) # copy mutable field
if directives.get('np_pythran', False) and not options['cplus']:
import warnings
warnings.warn("C++ mode forced when in Pythran mode!")
options['cplus'] = True
options['compiler_directives'] = directives
if 'language_level' in directives and 'language_level' not in kw:
options['language_level'] = int(directives['language_level'])
......@@ -754,4 +758,5 @@ default_options = dict(
build_dir=None,
cache=None,
create_extension=None,
np_pythran=False
)
......@@ -22,13 +22,14 @@ from . import Nodes
from . import Options
from . import TypeSlots
from . import PyrexTypes
from . import Pythran
from .Errors import error, warning
from .PyrexTypes import py_object_type
from ..Utils import open_new_file, replace_suffix, decode_filename
from .Code import UtilityCode
from .StringEncoding import EncodedString
from .Pythran import has_np_pythran
def check_c_declarations_pxd(module_node):
module_node.scope.check_c_classes_pxd()
......@@ -103,6 +104,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
self.scope.merge_in(scope)
def analyse_declarations(self, env):
if has_np_pythran(env):
Pythran.include_pythran_generic(env)
if self.directives:
env.old_style_globals = self.directives['old_style_globals']
if not Options.docstrings:
......@@ -115,6 +118,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
else:
env.doc = self.doc
env.directives = self.directives
self.body.analyse_declarations(env)
def prepare_utility_code(self):
......@@ -713,6 +717,9 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln('static const char * %s= %s;' % (Naming.cfilenm_cname, Naming.file_c_macro))
code.putln('static const char *%s;' % Naming.filename_cname)
if has_np_pythran(env):
env.use_utility_code(UtilityCode.load_cached("PythranConversion", "CppSupport.cpp"))
def generate_extern_c_macro_definition(self, code):
name = Naming.extern_c_macro
code.putln("#ifndef %s" % name)
......
......@@ -28,6 +28,7 @@ from .StringEncoding import EncodedString
from . import Future
from . import Options
from . import DebugFlags
from .Pythran import has_np_pythran, pythran_type, is_pythran_buffer
from ..Utils import add_metaclass
......@@ -1127,6 +1128,8 @@ class TemplatedTypeNode(CBaseTypeNode):
for name, value in options.items()])
self.type = PyrexTypes.BufferType(base_type, **options)
if has_np_pythran(env) and is_pythran_buffer(self.type):
self.type = PyrexTypes.PythranExpr(pythran_type(self.type), self.type)
else:
# Array
......@@ -2298,7 +2301,7 @@ class CFuncDefNode(FuncDefNode):
if type_arg.type.is_buffer and 'inline' in self.modifiers:
warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1)
if type_arg.type.is_buffer:
if type_arg.type.is_buffer or type_arg.type.is_pythran_expr:
if self.type.nogil:
error(formal_arg.pos,
"Buffer may not be acquired without the GIL. Consider using memoryview slices instead.")
......@@ -2817,6 +2820,13 @@ class DefNode(FuncDefNode):
name_declarator = None
else:
base_type = arg.base_type.analyse(env)
# If we hare in pythran mode and we got a buffer supported by
# Pythran, we change this node to a fused type
if has_np_pythran(env) and base_type.is_pythran_expr:
base_type = PyrexTypes.FusedType([
base_type,
#PyrexTypes.PythranExpr(pythran_type(self.type, "numpy_texpr")),
base_type.org_buffer])
name_declarator, type = \
arg.declarator.analyse(base_type, env)
arg.name = name_declarator.name
......@@ -2857,6 +2867,11 @@ class DefNode(FuncDefNode):
error(arg.pos, "Only Python type arguments can have 'or None'")
env.fused_to_specific = f2s
if has_np_pythran(env):
self.np_args_idx = [i for i,a in enumerate(self.args) if a.type.is_numpy_buffer]
else:
self.np_args_idx = []
def analyse_signature(self, env):
if self.entry.is_special:
if self.decorators:
......@@ -3146,6 +3161,8 @@ class DefNodeWrapper(FuncDefNode):
self.signature = target_entry.signature
self.np_args_idx = self.target.np_args_idx
def prepare_argument_coercion(self, env):
# This is only really required for Cython utility code at this time,
# everything else can be done during code generation. But we expand
......
......@@ -175,6 +175,7 @@ _directive_defaults = {
'type_version_tag': True, # enables Py_TPFLAGS_HAVE_VERSION_TAG on extension types
'unraisable_tracebacks': True,
'old_style_globals': False,
'np_pythran': False,
# set __file__ and/or __path__ to known source/target path at import time (instead of not having them available)
'set_initial_path' : None, # SOURCEFILE or "/full/path/to/module"
......@@ -311,6 +312,7 @@ directive_scopes = { # defaults to available everywhere
# globals() could conceivably be controlled at a finer granularity,
# but that would complicate the implementation
'old_style_globals': ('module',),
'np_pythran': ('module',)
}
......
......@@ -24,7 +24,6 @@ from .StringEncoding import EncodedString, _unicode
from .Errors import error, warning, CompileError, InternalError
from .Code import UtilityCode
class NameNodeCollector(TreeVisitor):
"""Collect all NameNodes of a (sub-)tree in the ``name_nodes``
attribute.
......@@ -601,6 +600,22 @@ class PxdPostParse(CythonTransform, SkipDeclarations):
else:
return node
class TrackNumpyAttributes(CythonTransform, SkipDeclarations):
def __init__(self, context):
super(TrackNumpyAttributes, self).__init__(context)
self.numpy_module_names = set()
def visit_CImportStatNode(self, node):
if node.module_name == u"numpy":
self.numpy_module_names.add(node.as_name or u"numpy")
return node
def visit_AttributeNode(self, node):
self.visitchildren(node)
if node.obj.is_name and node.obj.name in self.numpy_module_names:
node.is_numpy_attribute = True
return node
class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
"""
After parsing, directives can be stored in a number of places:
......@@ -859,7 +874,8 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
if optname:
directivetype = Options.directive_types.get(optname)
if directivetype is bool:
return [(optname, True)]
arg = ExprNodes.BoolNode(node.pos, value=True)
return [self.try_to_parse_directive(optname, [arg], None, node.pos)]
elif directivetype is None:
return [(optname, None)]
else:
......@@ -869,6 +885,8 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
def try_to_parse_directive(self, optname, args, kwds, pos):
directivetype = Options.directive_types.get(optname)
if optname == 'np_pythran' and not self.context.cpp:
raise PostParseError(pos, 'The %s directive can only be used in C++ mode.' % optname)
if len(args) == 1 and isinstance(args[0], ExprNodes.NoneNode):
return optname, Options.get_directive_defaults()[optname]
elif directivetype is bool:
......
......@@ -145,7 +145,7 @@ def create_pipeline(context, mode, exclude_classes=()):
from .ParseTreeTransforms import ForwardDeclareTypes, AnalyseDeclarationsTransform
from .ParseTreeTransforms import AnalyseExpressionsTransform, FindInvalidUseOfFusedTypes
from .ParseTreeTransforms import CreateClosureClasses, MarkClosureVisitor, DecoratorTransform
from .ParseTreeTransforms import InterpretCompilerDirectives, TransformBuiltinMethods
from .ParseTreeTransforms import TrackNumpyAttributes, InterpretCompilerDirectives, TransformBuiltinMethods
from .ParseTreeTransforms import ExpandInplaceOperators, ParallelRangeTransform
from .ParseTreeTransforms import CalculateQualifiedNamesTransform
from .TypeInference import MarkParallelAssignments, MarkOverflowingArithmetic
......@@ -183,6 +183,7 @@ def create_pipeline(context, mode, exclude_classes=()):
NormalizeTree(context),
PostParse(context),
_specific_post_parse,
TrackNumpyAttributes(context),
InterpretCompilerDirectives(context, context.compiler_directives),
ParallelRangeTransform(context),
AdjustDefByDirectives(context),
......
......@@ -186,6 +186,8 @@ class PyrexType(BaseType):
# is_returncode boolean Is used only to signal exceptions
# is_error boolean Is the dummy error type
# is_buffer boolean Is buffer access type
# is_pythran_expr boolean Is Pythran expr
# is_numpy_buffer boolean Is Numpy array buffer
# has_attributes boolean Has C dot-selectable attributes
# default_value string Initial value
# entry Entry The Entry for this type
......@@ -245,6 +247,8 @@ class PyrexType(BaseType):
is_buffer = 0
is_ctuple = 0
is_memoryviewslice = 0
is_pythran_expr = 0
is_numpy_buffer = 0
has_attributes = 0
default_value = ""
......@@ -1008,6 +1012,7 @@ class BufferType(BaseType):
self.mode = mode
self.negative_indices = negative_indices
self.cast = cast
self.is_numpy_buffer = self.base.name == "ndarray"
def can_coerce_to_pyobject(self,env):
return True
......@@ -1451,6 +1456,40 @@ class CType(PyrexType):
source_code,
code.error_goto_if(error_condition or self.error_condition(result_code), error_pos))
class PythranExpr(CType):
# Pythran object of a given type
to_py_function = "to_python_from_expr"
is_pythran_expr = True
writable = True
has_attributes = 1
def __init__(self, pythran_type, org_buffer=None):
self.org_buffer = org_buffer
self.pythran_type = pythran_type
self.name = self.pythran_type
self.cname = self.pythran_type
self.from_py_function = "from_python<%s>" % (self.pythran_type)
self.scope = None
def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0):
assert pyrex == 0
return "%s %s" % (self.name, entity_code)
def attributes_known(self):
if self.scope is None:
from . import Symtab
self.scope = scope = Symtab.CClassScope(
'',
None,
visibility="extern")
scope.parent_type = self
scope.directives = {}
# rank 3 == long
scope.declare_var("shape", CPtrType(CIntType(3)), None, cname="_shape", is_cdef=True)
scope.declare_var("ndim", CIntType(3), None, cname="value", is_cdef=True)
return True
class CConstType(BaseType):
......@@ -4127,13 +4166,17 @@ def best_match(arg_types, functions, pos=None, env=None, args=None):
# function that takes a char *, the coercion will mean that the
# type will simply become bytes. We need to do this coercion
# manually for overloaded and fused functions
if not assignable and src_type.is_pyobject:
if src_type.is_builtin_type and src_type.name == 'str' and dst_type.resolve().is_string:
c_src_type = dst_type.resolve()
else:
c_src_type = src_type.default_coerced_ctype()
if not assignable:
c_src_type = None
if src_type.is_pyobject:
if src_type.is_builtin_type and src_type.name == 'str' and dst_type.resolve().is_string:
c_src_type = dst_type.resolve()
else:
c_src_type = src_type.default_coerced_ctype()
elif src_type.is_pythran_expr:
c_src_type = src_type.org_buffer
if c_src_type:
if c_src_type is not None:
assignable = dst_type.assignable_from(c_src_type)
if assignable:
src_type = c_src_type
......
from .PyrexTypes import BufferType, CType, CTypedefType, CStructOrUnionType
_pythran_var_prefix = "__pythran__"
# Pythran/Numpy specific operations
def has_np_pythran(env):
while not env is None:
if hasattr(env, "directives") and env.directives.get('np_pythran', False):
return True
env = env.outer_scope
def is_pythran_supported_dtype(type_):
if isinstance(type_, CTypedefType):
return is_pythran_supported_type(type_.typedef_base_type)
return type_.is_numeric
def pythran_type(Ty,ptype="ndarray"):
if Ty.is_buffer:
ndim,dtype = Ty.ndim, Ty.dtype
if isinstance(dtype, CStructOrUnionType):
ctype = dtype.cname
elif isinstance(dtype, CType):
ctype = dtype.sign_and_name()
elif isinstance(dtype, CTypedefType):
ctype = dtype.typedef_cname
else:
raise ValueError("unsupported type %s!" % str(dtype))
return "pythonic::types::%s<%s,%d>" % (ptype,ctype, ndim)
from .PyrexTypes import PythranExpr
if Ty.is_pythran_expr:
return Ty.pythran_type
#if Ty.is_none:
# return "decltype(pythonic::__builtin__::None)"
if Ty.is_numeric:
return Ty.sign_and_name()
raise ValueError("unsupported pythran type %s (%s)" % (str(Ty), str(type(Ty))))
return None
def type_remove_ref(ty):
return "typename std::remove_reference<%s>::type" % ty
def pythran_binop_type(op, tA, tB):
return "decltype(std::declval<%s>() %s std::declval<%s>())" % \
(pythran_type(tA), op, pythran_type(tB))
def pythran_unaryop_type(op, type_):
return "decltype(%sstd::declval<%s>())" % (
op, pythran_type(type_))
def pythran_indexing_type(type_, indices):
def index_code(idx):
if idx.is_slice:
if idx.step.is_none:
func = "contiguous_slice"
n = 2
else:
func = "slice"
n = 3
return "pythonic::types::%s(%s)" % (func,",".join(["0"]*n))
elif idx.type.is_int:
return "std::declval<long>()"
elif idx.type.is_pythran_expr:
return "std::declval<%s>()" % idx.type.pythran_type
raise ValueError("unsupported indice type %s!" % idx.type)
indexing = ",".join(index_code(idx) for idx in indices)
return type_remove_ref("decltype(std::declval<%s>()(%s))" % (pythran_type(type_), indexing))
def pythran_indexing_code(indices):
def index_code(idx):
if idx.is_slice:
values = idx.start, idx.stop, idx.step
if idx.step.is_none:
func = "contiguous_slice"
values = values[:2]
else:
func = "slice"
return "pythonic::types::%s(%s)" % (func,",".join((v.pythran_result() for v in values)))
elif idx.type.is_int:
return idx.result()
elif idx.type.is_pythran_expr:
return idx.pythran_result()
raise ValueError("unsupported indice type %s!" % str(idx.type))
return ",".join(index_code(idx) for idx in indices)
def pythran_func_type(func, args):
args = ",".join(("std::declval<%s>()" % pythran_type(a.type) for a in args))
return "decltype(pythonic::numpy::functor::%s{}(%s))" % (func, args)
def to_pythran(op,ptype=None):
op_type = op.type
if is_type(op_type,["is_pythran_expr", "is_int", "is_numeric", "is_float",
"is_complex"]):
return op.result()
if op.is_none:
return "pythonic::__builtin__::None"
if ptype is None:
ptype = pythran_type(op_type)
assert(op.type.is_pyobject)
return "from_python<%s>(%s)" % (ptype, op.py_result())
def from_pythran():
return "to_python"
def is_type(type_, types):
for attr in types:
if getattr(type_, attr, False):
return True
return False
def is_pythran_supported_node_or_none(node):
return node.is_none or is_pythran_supported_type(node.type)
def is_pythran_supported_type(type_):
pythran_supported = (
"is_pythran_expr", "is_int", "is_numeric", "is_float", "is_none",
"is_complex")
return is_type(type_, pythran_supported) or is_pythran_expr(type_)
def is_pythran_supported_operation_type(type_):
pythran_supported = (
"is_pythran_expr", "is_int", "is_numeric", "is_float", "is_complex")
return is_type(type_,pythran_supported) or is_pythran_expr(type_)
def is_pythran_expr(type_):
return type_.is_pythran_expr
def is_pythran_buffer(type_):
return type_.is_numpy_buffer and is_pythran_supported_dtype(type_.dtype) and \
type_.mode in ("c","strided") and not type_.cast
def include_pythran_generic(env):
# Generic files
env.add_include_file("pythonic/core.hpp")
env.add_include_file("pythonic/python/core.hpp")
env.add_include_file("pythonic/types/bool.hpp")
env.add_include_file("pythonic/types/ndarray.hpp")
env.add_include_file("<new>") # for placement new
for i in (8,16,32,64):
env.add_include_file("pythonic/types/uint%d.hpp" % i)
env.add_include_file("pythonic/types/int%d.hpp" % i)
for t in ("float", "float32", "float64", "set", "slice", "tuple", "int",
"long", "complex", "complex64", "complex128"):
env.add_include_file("pythonic/types/%s.hpp" % t)
def include_pythran_type(env, type_):
pass
def type_is_numpy(type_):
if not hasattr(type_, "is_numpy"):
return False
return type_.is_numpy
......@@ -1205,6 +1205,10 @@ class ModuleScope(Scope):
scope = scope.find_submodule(submodule)
return scope
def generate_library_function_declarations(self, code):
if self.directives['np_pythran']:
code.putln("import_array();")
def lookup_submodule(self, name):
# Return scope for submodule of this module, or None.
if '.' in name:
......
......@@ -342,28 +342,28 @@ cdef extern from "numpy/arrayobject.h":
double imag
ctypedef struct npy_clongdouble:
double real
double imag
long double real
long double imag
ctypedef struct npy_complex64:
double real
double imag
float real
float imag
ctypedef struct npy_complex128:
double real
double imag
ctypedef struct npy_complex160:
double real
double imag
long double real
long double imag
ctypedef struct npy_complex192:
double real
double imag
long double real
long double imag
ctypedef struct npy_complex256:
double real
double imag
long double real
long double imag
ctypedef struct PyArray_Dims:
npy_intp *ptr
......
......@@ -46,3 +46,13 @@ static void __Pyx_CppExn2PyErr() {
}
}
#endif
/////////////// PythranConversion.proto ///////////////
template <class T>
auto to_python_from_expr(T &&value) -> decltype(to_python(
typename pythonic::returnable<typename std::remove_cv<typename std::remove_reference<T>::type>::type>::type{std::forward<T>(value)}))
{
using returnable_type = typename pythonic::returnable<typename std::remove_cv<typename std::remove_reference<T>::type>::type>::type;
return to_python(returnable_type{std::forward<T>(value)});
}
......@@ -420,6 +420,19 @@
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
/////////////// CInitCode ///////////////
......@@ -662,6 +675,22 @@ static int __Pyx_check_binary_version(void) {
return 0;
}
/////////////// IsLittleEndian.proto ///////////////
static int __Pyx_Is_Little_Endian(void);
/////////////// IsLittleEndian ///////////////
static int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/////////////// Refnanny.proto ///////////////
#ifndef CYTHON_REFNANNY
......
......@@ -23,6 +23,7 @@ Contents:
parallelism
debugging
numpy_tutorial
numpy_pythran
Indices and tables
------------------
......
.. highlight:: python
.. _numpy-pythran:
**************************
Pythran as a Numpy backend
**************************
Using the flag ``--np-pythran``, it is possible to use the `Pythran`_ numpy
implementation for numpy related operations. One advantage to use this backend
is that the Pythran implementation uses C++ expression templates to save memory
transfers and can benefit from SIMD instructions of modern CPU.
This can lead to really interesting speedup in some cases, going from 2 up to
16, depending on the targeted CPU architecture and the original algorithm.
Please note that this feature is experimental.
Usage example with distutils
----------------------------
You first need to install Pythran. See its `documentation
<https://pythonhosted.org/pythran/MANUAL.html>`_ for more information.
Then, simply add a ``cython: np_pythran=True`` directive at the top of the
Python files that needs to be compiled using Pythran numpy support.
Here is an example of a simple ``setup.py`` file using distutils:
.. code::
from distutils.core import setup
from Cython.Build import cythonize
setup(
name = "My hello app",
ext_modules = cythonize('hello_pythran.pyx')
)
Then, with the following header in ``hello_pythran.pyx``:
.. code::
# cython: np_pythran=True
``hello_pythran.pyx`` will be compiled using Pythran numpy support.
Please note that Pythran can further be tweaked by adding settings in the
``$HOME/.pythranrc`` file. For instance, this can be used to enable `Boost.SIMD`_ support.
See the `Pythran user manual
<https://pythonhosted.org/pythran/MANUAL.html#customizing-your-pythranrc>`_ for
more information.
.. _Pythran: https://github.com/serge-sans-paille/pythran
.. _Boost.SIMD: https://github.com/NumScale/boost.simd
......@@ -487,7 +487,7 @@ class TestBuilder(object):
cleanup_workdir, cleanup_sharedlibs, cleanup_failures,
with_pyregr, cython_only, languages, test_bugs, fork, language_level,
test_determinism,
common_utility_dir):
common_utility_dir, pythran_dir=None):
self.rootdir = rootdir
self.workdir = workdir
self.selectors = selectors
......@@ -504,6 +504,7 @@ class TestBuilder(object):
self.language_level = language_level
self.test_determinism = test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
def build_suite(self):
suite = unittest.TestSuite()
......@@ -607,13 +608,14 @@ class TestBuilder(object):
preparse_list = tags.get('preparse', ['id'])
tests = [ self.build_test(test_class, path, workdir, module, tags, language,
expect_errors, expect_warnings, warning_errors, preparse)
expect_errors, expect_warnings, warning_errors, preparse,
self.pythran_dir if language == "cpp" else None)
for language in languages
for preparse in preparse_list ]
return tests
def build_test(self, test_class, path, workdir, module, tags, language,
expect_errors, expect_warnings, warning_errors, preparse):
expect_errors, expect_warnings, warning_errors, preparse, pythran_dir):
language_workdir = os.path.join(workdir, language)
if not os.path.exists(language_workdir):
os.makedirs(language_workdir)
......@@ -634,7 +636,8 @@ class TestBuilder(object):
language_level=self.language_level,
warning_errors=warning_errors,
test_determinism=self.test_determinism,
common_utility_dir=self.common_utility_dir)
common_utility_dir=self.common_utility_dir,
pythran_dir=pythran_dir)
class CythonCompileTestCase(unittest.TestCase):
......@@ -643,7 +646,7 @@ class CythonCompileTestCase(unittest.TestCase):
cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False,
fork=True, language_level=2, warning_errors=False,
test_determinism=False,
common_utility_dir=None):
common_utility_dir=None, pythran_dir=None):
self.test_directory = test_directory
self.tags = tags
self.workdir = workdir
......@@ -663,10 +666,11 @@ class CythonCompileTestCase(unittest.TestCase):
self.warning_errors = warning_errors
self.test_determinism = test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
unittest.TestCase.__init__(self)
def shortDescription(self):
return "compiling (%s) %s" % (self.language, self.name)
return "compiling (%s%s) %s" % (self.language, "/pythran" if self.pythran_dir is not None else "", self.name)
def setUp(self):
from Cython.Compiler import Options
......@@ -846,6 +850,7 @@ class CythonCompileTestCase(unittest.TestCase):
annotate = annotate,
use_listing_file = False,
cplus = self.language == 'cpp',
np_pythran = self.pythran_dir is not None,
language_level = self.language_level,
generate_pxi = False,
evaluate_tree_assertions = True,
......@@ -875,6 +880,9 @@ class CythonCompileTestCase(unittest.TestCase):
if extra_extension_args is None:
extra_extension_args = {}
if self.pythran_dir is not None:
ext_compile_flags.extend(['-I',self.pythran_dir,'-DENABLE_PYTHON_MODULE','-std=c++11','-D__PYTHRAN__=%d' % sys.version_info.major,'-Wno-cpp'])
related_files = self.related_files(test_directory, module)
self.copy_files(test_directory, workdir, related_files)
......@@ -1044,7 +1052,7 @@ class CythonRunTestCase(CythonCompileTestCase):
if self.cython_only:
return CythonCompileTestCase.shortDescription(self)
else:
return "compiling (%s) and running %s" % (self.language, self.name)
return "compiling (%s%s) and running %s" % (self.language, "/pythran" if self.pythran_dir is not None else "", self.name)
def run(self, result=None):
if result is None:
......@@ -1839,6 +1847,8 @@ def main():
parser.add_option("--use_formal_grammar", default=False, action="store_true")
parser.add_option("--test_determinism", default=False, action="store_true",
help="test whether Cython's output is deterministic")
parser.add_option("--pythran-dir", dest="pythran_dir", default=None,
help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy")
options, cmd_args = parser.parse_args(args)
......@@ -2095,7 +2105,7 @@ def runtests(options, cmd_args, coverage=None):
options.cython_only, languages, test_bugs,
options.fork, options.language_level,
options.test_determinism,
common_utility_dir)
common_utility_dir, options.pythran_dir)
test_suite.addTest(filetests.build_suite())
if options.system_pyregr and languages:
......@@ -2110,7 +2120,7 @@ def runtests(options, cmd_args, coverage=None):
options.cython_only, languages, test_bugs,
options.fork, sys.version_info[0],
options.test_determinism,
common_utility_dir)
common_utility_dir, options.pythran_dir)
sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir)
test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr'))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment