Commit 63e4d374 authored by Mark Florisson's avatar Mark Florisson

Merge branch 'release'

Conflicts:
	Cython/Compiler/ParseTreeTransforms.py
parents f1744511 f4347894
...@@ -34,8 +34,6 @@ import Symtab ...@@ -34,8 +34,6 @@ import Symtab
import Options import Options
from Cython import Utils from Cython import Utils
from Annotate import AnnotationItem from Annotate import AnnotationItem
from NumpySupport import numpy_transform_attribute_node, \
should_apply_numpy_hack
from Cython.Debugging import print_call_chain from Cython.Debugging import print_call_chain
from DebugFlags import debug_disposal_code, debug_temp_alloc, \ from DebugFlags import debug_disposal_code, debug_temp_alloc, \
...@@ -4460,16 +4458,6 @@ class AttributeNode(ExprNode): ...@@ -4460,16 +4458,6 @@ class AttributeNode(ExprNode):
# method of an extension type, so we treat it like a Python # method of an extension type, so we treat it like a Python
# attribute. # attribute.
pass pass
# NumPy hack
if (getattr(self.obj, 'type', None) and obj_type.is_extension_type
and should_apply_numpy_hack(obj_type)):
replacement_node = numpy_transform_attribute_node(self)
# Since we can't actually replace our node yet, we only grasp its
# type, and then the replacement happens in
# AnalyseExpresssionsTransform...
self.type = replacement_node.type
if replacement_node is not self:
return
# If we get here, the base object is not a struct/union/extension # If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not # type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python # declared or is declared as a Python method. Treat it as a Python
......
...@@ -24,10 +24,6 @@ module_name_pattern = re.compile(r"[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_ ...@@ -24,10 +24,6 @@ module_name_pattern = re.compile(r"[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_
verbose = 0 verbose = 0
standard_include_path = os.path.abspath(os.path.normpath(
os.path.join(os.path.dirname(__file__), os.path.pardir, 'Includes')))
class CompilationData(object): class CompilationData(object):
# Bundles the information that is passed from transform to transform. # Bundles the information that is passed from transform to transform.
# (For now, this is only) # (For now, this is only)
...@@ -74,6 +70,8 @@ class Context(object): ...@@ -74,6 +70,8 @@ class Context(object):
self.pxds = {} # full name -> node tree self.pxds = {} # full name -> node tree
standard_include_path = os.path.abspath(os.path.normpath(
os.path.join(os.path.dirname(__file__), os.path.pardir, 'Includes')))
self.include_directories = include_directories + [standard_include_path] self.include_directories = include_directories + [standard_include_path]
self.set_language_level(language_level) self.set_language_level(language_level)
......
# The hacks that are specific for NumPy. These were introduced because
# the NumPy ABI changed so that the shape, ndim, strides, etc. fields were
# no longer available, however the use of these were so entrenched in
# Cython codes
import os
from StringEncoding import EncodedString
def should_apply_numpy_hack(obj_type):
if not obj_type.is_extension_type or obj_type.objstruct_cname != 'PyArrayObject':
return False
from Scanning import FileSourceDescriptor
from Main import standard_include_path
type_source = obj_type.pos[0]
if isinstance(type_source, FileSourceDescriptor):
type_source_path = os.path.abspath(os.path.normpath(type_source.filename))
return type_source_path == os.path.join(standard_include_path, 'numpy.pxd')
else:
return False
def numpy_transform_attribute_node(node):
import PyrexTypes
import ExprNodes
assert isinstance(node, ExprNodes.AttributeNode)
if node.obj.type.objstruct_cname != 'PyArrayObject':
return node
pos = node.pos
numpy_pxd_scope = node.obj.type.scope.parent_scope
def macro_call_node(numpy_macro_name):
array_node = node.obj
func_entry = numpy_pxd_scope.entries[numpy_macro_name]
function_name_node = ExprNodes.NameNode(
name=EncodedString(numpy_macro_name),
pos=pos,
entry=func_entry,
is_called=1,
type=func_entry.type,
cf_maybe_null=False,
cf_is_null=False)
call_node = ExprNodes.SimpleCallNode(
pos=pos,
function=function_name_node,
name=EncodedString(numpy_macro_name),
args=[array_node],
type=func_entry.type.return_type,
analysed=True)
return call_node
if node.attribute == u'ndim':
result = macro_call_node(u'PyArray_NDIM')
elif node.attribute == u'data':
call_node = macro_call_node(u'PyArray_DATA')
cast_node = ExprNodes.TypecastNode(pos,
type=PyrexTypes.c_char_ptr_type,
operand=call_node)
result = cast_node
elif node.attribute == u'shape':
result = macro_call_node(u'PyArray_DIMS')
elif node.attribute == u'strides':
result = macro_call_node(u'PyArray_STRIDES')
else:
result = node
return result
...@@ -18,8 +18,7 @@ from Cython.Compiler.TreeFragment import TreeFragment ...@@ -18,8 +18,7 @@ from Cython.Compiler.TreeFragment import TreeFragment
from Cython.Compiler.StringEncoding import EncodedString from Cython.Compiler.StringEncoding import EncodedString
from Cython.Compiler.Errors import error, warning, CompileError, InternalError from Cython.Compiler.Errors import error, warning, CompileError, InternalError
from Cython.Compiler.Code import UtilityCode from Cython.Compiler.Code import UtilityCode
from Cython.Compiler.NumpySupport import (should_apply_numpy_hack,
numpy_transform_attribute_node)
import copy import copy
...@@ -1748,7 +1747,6 @@ if VALUE is not None: ...@@ -1748,7 +1747,6 @@ if VALUE is not None:
class AnalyseExpressionsTransform(CythonTransform): class AnalyseExpressionsTransform(CythonTransform):
# Also handles NumPy
def visit_ModuleNode(self, node): def visit_ModuleNode(self, node):
self.env_stack = [node.scope] self.env_stack = [node.scope]
...@@ -1790,19 +1788,10 @@ class AnalyseExpressionsTransform(CythonTransform): ...@@ -1790,19 +1788,10 @@ class AnalyseExpressionsTransform(CythonTransform):
elif node.memslice_ellipsis_noop: elif node.memslice_ellipsis_noop:
# memoryviewslice[...] expression, drop the IndexNode # memoryviewslice[...] expression, drop the IndexNode
node = node.base node = node.base
return node
def visit_AttributeNode(self, node):
self.visitchildren(node)
type = node.obj.type
if (not node.type.is_error and type.is_extension_type and
should_apply_numpy_hack(type)):
node = numpy_transform_attribute_node(node)
self.visitchildren(node)
return node return node
class FindInvalidUseOfFusedTypes(CythonTransform): class FindInvalidUseOfFusedTypes(CythonTransform):
def visit_FuncDefNode(self, node): def visit_FuncDefNode(self, node):
......
...@@ -188,7 +188,6 @@ def create_pipeline(context, mode, exclude_classes=()): ...@@ -188,7 +188,6 @@ def create_pipeline(context, mode, exclude_classes=()):
_check_c_declarations, _check_c_declarations,
InlineDefNodeCalls(context), InlineDefNodeCalls(context),
AnalyseExpressionsTransform(context), AnalyseExpressionsTransform(context),
# AnalyseExpressionsTransform also contains the NumPy-specific support
FindInvalidUseOfFusedTypes(context), FindInvalidUseOfFusedTypes(context),
CreateClosureClasses(context), ## After all lookups and type inference CreateClosureClasses(context), ## After all lookups and type inference
ExpandInplaceOperators(context), ExpandInplaceOperators(context),
......
...@@ -151,9 +151,6 @@ cdef extern from "numpy/arrayobject.h": ...@@ -151,9 +151,6 @@ cdef extern from "numpy/arrayobject.h":
ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *)
ctypedef struct PyArray_Descr:
pass
ctypedef class numpy.dtype [object PyArray_Descr]: ctypedef class numpy.dtype [object PyArray_Descr]:
# Use PyDataType_* macros when possible, however there are no macros # Use PyDataType_* macros when possible, however there are no macros
# for accessing some of the fields, so some are defined. Please # for accessing some of the fields, so some are defined. Please
...@@ -180,11 +177,15 @@ cdef extern from "numpy/arrayobject.h": ...@@ -180,11 +177,15 @@ cdef extern from "numpy/arrayobject.h":
ctypedef class numpy.ndarray [object PyArrayObject]: ctypedef class numpy.ndarray [object PyArrayObject]:
cdef __cythonbufferdefaults__ = {"mode": "strided"} cdef __cythonbufferdefaults__ = {"mode": "strided"}
# Note: The fields are no longer defined, please use accessor cdef:
# functions. Cython special-cases/hacks the data, ndim, shape # Only taking a few of the most commonly used and stable fields.
# and stride attributes of the ndarray to use accessor # One should use PyArray_* macros instead to access the C fields.
# functions for backwards compatability and convenience. char *data
int ndim "nd"
npy_intp *shape "dimensions"
npy_intp *strides
dtype descr
PyObject* base
# Note: This syntax (function definition in pxd files) is an # Note: This syntax (function definition in pxd files) is an
# experimental exception made for __getbuffer__ and __releasebuffer__ # experimental exception made for __getbuffer__ and __releasebuffer__
...@@ -235,7 +236,7 @@ cdef extern from "numpy/arrayobject.h": ...@@ -235,7 +236,7 @@ cdef extern from "numpy/arrayobject.h":
cdef int t cdef int t
cdef char* f = NULL cdef char* f = NULL
cdef dtype descr = get_array_dtype(self) cdef dtype descr = self.descr
cdef list stack cdef list stack
cdef int offset cdef int offset
...@@ -375,29 +376,20 @@ cdef extern from "numpy/arrayobject.h": ...@@ -375,29 +376,20 @@ cdef extern from "numpy/arrayobject.h":
bint PyArray_ISWRITEABLE(ndarray m) bint PyArray_ISWRITEABLE(ndarray m)
bint PyArray_ISALIGNED(ndarray m) bint PyArray_ISALIGNED(ndarray m)
int PyArray_NDIM(ndarray) nogil int PyArray_NDIM(ndarray)
bint PyArray_ISONESEGMENT(ndarray) bint PyArray_ISONESEGMENT(ndarray)
bint PyArray_ISFORTRAN(ndarray) bint PyArray_ISFORTRAN(ndarray)
int PyArray_FORTRANIF(ndarray) int PyArray_FORTRANIF(ndarray)
void* PyArray_DATA(ndarray) nogil void* PyArray_DATA(ndarray)
char* PyArray_BYTES(ndarray) nogil char* PyArray_BYTES(ndarray)
npy_intp* PyArray_DIMS(ndarray) nogil npy_intp* PyArray_DIMS(ndarray)
npy_intp* PyArray_STRIDES(ndarray) nogil npy_intp* PyArray_STRIDES(ndarray)
npy_intp PyArray_DIM(ndarray, size_t) nogil npy_intp PyArray_DIM(ndarray, size_t)
npy_intp PyArray_STRIDE(ndarray, size_t) nogil npy_intp PyArray_STRIDE(ndarray, size_t)
# The two functions below return borrowed references and should
# be used with care; often you will want to use get_array_base
# or get_array_dtype (define below) instead from Cython.
PyObject* PyArray_BASE(ndarray)
# Cython API of the function below might change! PyArray_DESCR
# actually returns PyArray_Descr* == pointer-version of dtype,
# which appears to be difficult to declare properly in Cython;
# protect it with trailing underscore for now just to avoid having
# user code depend on it without reading this note.
PyArray_Descr * PyArray_DESCR_ "PyArray_DESCR"(ndarray)
# object PyArray_BASE(ndarray) wrong refcount semantics
# dtype PyArray_DESCR(ndarray) wrong refcount semantics
int PyArray_FLAGS(ndarray) int PyArray_FLAGS(ndarray)
npy_intp PyArray_ITEMSIZE(ndarray) npy_intp PyArray_ITEMSIZE(ndarray)
int PyArray_TYPE(ndarray arr) int PyArray_TYPE(ndarray arr)
...@@ -969,34 +961,18 @@ cdef extern from "numpy/ufuncobject.h": ...@@ -969,34 +961,18 @@ cdef extern from "numpy/ufuncobject.h":
void import_ufunc() void import_ufunc()
# The ability to set the base field of an ndarray seems to be cdef inline void set_array_base(ndarray arr, object base):
# deprecated in NumPy 1.7 (no PyArray_SET_BASE seems to be cdef PyObject* baseptr
# available). Remove this support and see who complains and how their if base is None:
# case could be fixed in 1.7... baseptr = NULL
# else:
#cdef inline void set_array_base(ndarray arr, object base): Py_INCREF(base) # important to do this before decref below!
# cdef PyObject* baseptr baseptr = <PyObject*>base
# if base is None: Py_XDECREF(arr.base)
# baseptr = NULL arr.base = baseptr
# else:
# Py_INCREF(base) # important to do this before decref below!
# baseptr = <PyObject*>base
# Py_XDECREF(arr.base)
# arr.base = baseptr
cdef inline object get_array_base(ndarray arr): cdef inline object get_array_base(ndarray arr):
cdef PyObject *pobj = PyArray_BASE(arr) if arr.base is NULL:
if pobj != NULL:
obj = <object>pobj
Py_INCREF(obj)
return obj
else:
return None return None
cdef inline dtype get_array_dtype(ndarray arr):
if PyArray_DESCR_(arr) != NULL:
obj = <object>PyArray_DESCR_(arr)
Py_INCREF(obj)
return obj
else: else:
return None return <object>arr.base
__version__ = "0.16rc1" __version__ = "0.16rc2"
# Void cython.* directives (for case insensitive operating systems). # Void cython.* directives (for case insensitive operating systems).
from Cython.Shadow import * from Cython.Shadow import *
# tag: numpy
import numpy as np
cimport numpy as np
int64_array = np.ones((3, 2), dtype=np.int64)
def f():
"""
>>> f()
ndim 2
data 1
shape 3 2
shape[1] 2
strides 16 8
"""
cdef np.ndarray x = int64_array
cdef int i
cdef Py_ssize_t j, k
cdef char *p
# todo: int * p: 23:13: Cannot assign type 'char *' to 'int *'
with nogil:
i = x.ndim
print 'ndim', i
with nogil:
p = x.data
print 'data', (<np.int64_t*>p)[0]
with nogil:
j = x.shape[0]
k = x.shape[1]
print 'shape', j, k
# Check that non-typical uses still work
cdef np.npy_intp *shape
with nogil:
shape = x.shape + 1
print 'shape[1]', shape[0]
with nogil:
j = x.strides[0]
k = x.strides[1]
print 'strides', j, k
def test_non_namenode_attribute_access(obj):
"""
>>> test_non_namenode_attribute_access(int64_array)
data 1
"""
# Try casting, resulting in an AttributeNode with a TypeCastNode as object
# and 'data' as attribute
print "data", (<np.int64_t *> (<np.ndarray> obj).data)[0]
...@@ -221,7 +221,7 @@ class NestedWith(unittest.TestCase): ...@@ -221,7 +221,7 @@ class NestedWith(unittest.TestCase):
def testEnterReturnsTuple(self): def testEnterReturnsTuple(self):
with Dummy(value=(1,2)) as (a1, a2), \ with Dummy(value=(1,2)) as (a1, a2), \
Dummy(value=(10, 20)) as (b1, b2): Dummy(value=(10, 20)) as (b1, b2):
self.assertEquals(1, a1) self.assertEqual(1, a1)
self.assertEquals(2, a2) self.assertEqual(2, a2)
self.assertEquals(10, b1) self.assertEqual(10, b1)
self.assertEquals(20, b2) self.assertEqual(20, b2)
...@@ -288,7 +288,7 @@ class NestedWith(unittest.TestCase): ...@@ -288,7 +288,7 @@ class NestedWith(unittest.TestCase):
def testEnterReturnsTuple(self): def testEnterReturnsTuple(self):
with Dummy(value=(1,2)) as (a1, a2), \ with Dummy(value=(1,2)) as (a1, a2), \
Dummy(value=(10, 20)) as (b1, b2): Dummy(value=(10, 20)) as (b1, b2):
self.assertEquals(1, a1) self.assertEqual(1, a1)
self.assertEquals(2, a2) self.assertEqual(2, a2)
self.assertEquals(10, b1) self.assertEqual(10, b1)
self.assertEquals(20, b2) self.assertEqual(20, b2)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment