Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
C
cython
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Labels
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
nexedi
cython
Commits
7ef9c72f
Commit
7ef9c72f
authored
Mar 29, 2012
by
Robert Bradshaw
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'release'
Conflicts: tests/run/numpy_common.pxi
parents
c711c46a
d093dcc0
Changes
15
Hide whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
281 additions
and
61 deletions
+281
-61
Cython/Build/Dependencies.py
Cython/Build/Dependencies.py
+4
-1
Cython/Compiler/Code.py
Cython/Compiler/Code.py
+20
-0
Cython/Compiler/ExprNodes.py
Cython/Compiler/ExprNodes.py
+11
-7
Cython/Compiler/Nodes.py
Cython/Compiler/Nodes.py
+20
-0
Cython/Compiler/NumpySupport.py
Cython/Compiler/NumpySupport.py
+56
-0
Cython/Compiler/Optimize.py
Cython/Compiler/Optimize.py
+3
-0
Cython/Compiler/ParseTreeTransforms.py
Cython/Compiler/ParseTreeTransforms.py
+11
-1
Cython/Compiler/Pipeline.py
Cython/Compiler/Pipeline.py
+1
-0
Cython/Includes/numpy.pxd
Cython/Includes/numpy.pxd
+54
-30
Cython/Utility/Buffer.c
Cython/Utility/Buffer.c
+4
-0
tests/run/numpy_attributes.pyx
tests/run/numpy_attributes.pyx
+45
-0
tests/run/numpy_common.pxi
tests/run/numpy_common.pxi
+9
-9
tests/run/numpy_memoryview.pyx
tests/run/numpy_memoryview.pyx
+12
-4
tests/run/numpy_test.pyx
tests/run/numpy_test.pyx
+12
-9
tests/run/reversed_iteration.pyx
tests/run/reversed_iteration.pyx
+19
-0
No files found.
Cython/Build/Dependencies.py
View file @
7ef9c72f
...
...
@@ -269,7 +269,10 @@ def parse_dependencies(source_filename):
if
'
\
t
'
in
source
:
source
=
source
.
replace
(
'
\
t
'
,
' '
)
# TODO: pure mode
dependancy
=
re
.
compile
(
r"(cimport +([0-9a-zA-Z_.]+)\b)|(from +([0-9a-zA-Z_.]+) +cimport)|(include +'([^']+)')|(cdef +extern +from +'([^']+)')"
)
dependancy
=
re
.
compile
(
r"(cimport +([0-9a-zA-Z_.]+)\b)|"
"(from +([0-9a-zA-Z_.]+) +cimport)|"
"(include +['
\
"
]([^'
\
"
]+)['
\
"
])|"
"(cdef +extern +from +['
\
"
]([^'
\
"
]+)['
\
"
])"
)
cimports
=
[]
includes
=
[]
externs
=
[]
...
...
Cython/Compiler/Code.py
View file @
7ef9c72f
...
...
@@ -1903,6 +1903,26 @@ class CCodeWriter(object):
self
.
putln
(
string
)
self
.
putln
(
"#endif /* _OPENMP */"
)
def
undef_builtin_expect
(
self
,
cond
):
"""
Redefine the macros likely() and unlikely to no-ops, depending on
condition 'cond'
"""
self
.
putln
(
"#if %s"
%
cond
)
self
.
putln
(
" #undef likely"
)
self
.
putln
(
" #undef unlikely"
)
self
.
putln
(
" #define likely(x) (x)"
)
self
.
putln
(
" #define unlikely(x) (x)"
)
self
.
putln
(
"#endif"
)
def
redef_builtin_expect
(
self
,
cond
):
self
.
putln
(
"#if %s"
%
cond
)
self
.
putln
(
" #undef likely"
)
self
.
putln
(
" #undef unlikely"
)
self
.
putln
(
" #define likely(x) __builtin_expect(!!(x), 1)"
)
self
.
putln
(
" #define unlikely(x) __builtin_expect(!!(x), 0)"
)
self
.
putln
(
"#endif"
)
class
PyrexCodeWriter
(
object
):
# f file output file
# level int indentation level
...
...
Cython/Compiler/ExprNodes.py
View file @
7ef9c72f
...
...
@@ -1363,16 +1363,9 @@ class NameNode(AtomicExprNode):
allow_null
=
False
nogil
=
False
def
create_analysed_rvalue
(
pos
,
env
,
entry
):
node
=
NameNode
(
pos
)
node
.
analyse_types
(
env
,
entry
=
entry
)
return
node
def
as_cython_attribute
(
self
):
return
self
.
cython_attribute
create_analysed_rvalue
=
staticmethod
(
create_analysed_rvalue
)
def
type_dependencies
(
self
,
env
):
if
self
.
entry
is
None
:
self
.
entry
=
env
.
lookup
(
self
.
name
)
...
...
@@ -4435,6 +4428,17 @@ class AttributeNode(ExprNode):
# method of an extension type, so we treat it like a Python
# attribute.
pass
# NumPy hack
if
obj_type
.
is_extension_type
and
obj_type
.
objstruct_cname
==
'PyArrayObject'
:
from
NumpySupport
import
numpy_transform_attribute_node
replacement_node
=
numpy_transform_attribute_node
(
self
)
# Since we can't actually replace our node yet, we only grasp its
# type, and then the replacement happens in
# AnalyseExpresssionsTransform...
self
.
type
=
replacement_node
.
type
if
replacement_node
is
not
self
:
return
# If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
...
...
Cython/Compiler/Nodes.py
View file @
7ef9c72f
...
...
@@ -7494,6 +7494,8 @@ class ParallelStatNode(StatNode, ParallelNode):
self
.
begin_of_parallel_control_block_point
=
code
.
insertion_point
()
self
.
begin_of_parallel_control_block_point_after_decls
=
code
.
insertion_point
()
self
.
undef_builtin_expect_apple_gcc_bug
(
code
)
def
begin_parallel_block
(
self
,
code
):
"""
Each OpenMP thread in a parallel section that contains a with gil block
...
...
@@ -7786,6 +7788,24 @@ class ParallelStatNode(StatNode, ParallelNode):
"}"
)
# end if
code
.
end_block
()
# end parallel control flow block
self
.
redef_builtin_expect_apple_gcc_bug
(
code
)
# FIXME: improve with version number for OS X Lion
buggy_platform_macro_condition
=
"(defined(__APPLE__) || defined(__OSX__))"
have_expect_condition
=
"(defined(__GNUC__) && "
\
"(__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))"
redef_condition
=
"(%s && %s)"
%
(
buggy_platform_macro_condition
,
have_expect_condition
)
def
undef_builtin_expect_apple_gcc_bug
(
self
,
code
):
"""
A bug on OS X Lion disallows __builtin_expect macros. This code avoids them
"""
if
not
self
.
parent
:
code
.
undef_builtin_expect
(
self
.
redef_condition
)
def
redef_builtin_expect_apple_gcc_bug
(
self
,
code
):
if
not
self
.
parent
:
code
.
redef_builtin_expect
(
self
.
redef_condition
)
class
ParallelWithBlockNode
(
ParallelStatNode
):
...
...
Cython/Compiler/NumpySupport.py
0 → 100644
View file @
7ef9c72f
# The hacks that are specific for NumPy. These were introduced because
# the NumPy ABI changed so that the shape, ndim, strides, etc. fields were
# no longer available, however the use of these were so entrenched in
# Cython codes
import
PyrexTypes
import
ExprNodes
from
StringEncoding
import
EncodedString
def
numpy_transform_attribute_node
(
node
):
assert
isinstance
(
node
,
ExprNodes
.
AttributeNode
)
if
node
.
obj
.
type
.
objstruct_cname
!=
'PyArrayObject'
:
return
node
pos
=
node
.
pos
numpy_pxd_scope
=
node
.
obj
.
entry
.
type
.
scope
.
parent_scope
def
macro_call_node
(
numpy_macro_name
):
array_node
=
node
.
obj
func_entry
=
numpy_pxd_scope
.
entries
[
numpy_macro_name
]
function_name_node
=
ExprNodes
.
NameNode
(
name
=
EncodedString
(
numpy_macro_name
),
pos
=
pos
,
entry
=
func_entry
,
is_called
=
1
,
type
=
func_entry
.
type
,
cf_maybe_null
=
False
,
cf_is_null
=
False
)
call_node
=
ExprNodes
.
SimpleCallNode
(
pos
=
pos
,
function
=
function_name_node
,
name
=
EncodedString
(
numpy_macro_name
),
args
=
[
array_node
],
type
=
func_entry
.
type
.
return_type
,
analysed
=
True
)
return
call_node
if
node
.
attribute
==
u'ndim'
:
result
=
macro_call_node
(
u'PyArray_NDIM'
)
elif
node
.
attribute
==
u'data'
:
call_node
=
macro_call_node
(
u'PyArray_DATA'
)
cast_node
=
ExprNodes
.
TypecastNode
(
pos
,
type
=
PyrexTypes
.
c_char_ptr_type
,
operand
=
call_node
)
result
=
cast_node
elif
node
.
attribute
==
u'shape'
:
result
=
macro_call_node
(
u'PyArray_DIMS'
)
elif
node
.
attribute
==
u'strides'
:
result
=
macro_call_node
(
u'PyArray_STRIDES'
)
else
:
result
=
node
return
result
Cython/Compiler/Optimize.py
View file @
7ef9c72f
...
...
@@ -563,6 +563,9 @@ class IterationTransform(Visitor.VisitorTransform):
if
step_value
==
0
:
# will lead to an error elsewhere
return
node
if
reversed
and
step_value
not
in
(
1
,
-
1
):
# FIXME: currently broken - requires calculation of the correct bounds
return
node
if
not
isinstance
(
step
,
ExprNodes
.
IntNode
):
step
=
ExprNodes
.
IntNode
(
step_pos
,
value
=
str
(
step_value
),
constant_result
=
step_value
)
...
...
Cython/Compiler/ParseTreeTransforms.py
View file @
7ef9c72f
...
...
@@ -1744,6 +1744,7 @@ if VALUE is not None:
class
AnalyseExpressionsTransform
(
CythonTransform
):
# Also handles NumPy
def
visit_ModuleNode
(
self
,
node
):
self
.
env_stack
=
[
node
.
scope
]
...
...
@@ -1785,9 +1786,18 @@ class AnalyseExpressionsTransform(CythonTransform):
elif
node
.
memslice_ellipsis_noop
:
# memoryviewslice[...] expression, drop the IndexNode
node
=
node
.
base
return
node
def
visit_AttributeNode
(
self
,
node
):
# Note: Expression analysis for attributes has already happened
# at this point (by recursive calls starting from FuncDefNode)
#print node.dump()
#return node
type
=
node
.
obj
.
type
if
type
.
is_extension_type
and
type
.
objstruct_cname
==
'PyArrayObject'
:
from
NumpySupport
import
numpy_transform_attribute_node
node
=
numpy_transform_attribute_node
(
node
)
return
node
class
FindInvalidUseOfFusedTypes
(
CythonTransform
):
...
...
Cython/Compiler/Pipeline.py
View file @
7ef9c72f
...
...
@@ -188,6 +188,7 @@ def create_pipeline(context, mode, exclude_classes=()):
_check_c_declarations
,
InlineDefNodeCalls
(
context
),
AnalyseExpressionsTransform
(
context
),
# AnalyseExpressionsTransform also contains the NumPy-specific support
FindInvalidUseOfFusedTypes
(
context
),
CreateClosureClasses
(
context
),
## After all lookups and type inference
ExpandInplaceOperators
(
context
),
...
...
Cython/Includes/numpy.pxd
View file @
7ef9c72f
...
...
@@ -151,6 +151,9 @@ cdef extern from "numpy/arrayobject.h":
ctypedef
void
(
*
PyArray_VectorUnaryFunc
)(
void
*
,
void
*
,
npy_intp
,
void
*
,
void
*
)
ctypedef
struct
PyArray_Descr
:
pass
ctypedef
class
numpy
.
dtype
[
object
PyArray_Descr
]:
# Use PyDataType_* macros when possible, however there are no macros
# for accessing some of the fields, so some are defined. Please
...
...
@@ -177,15 +180,11 @@ cdef extern from "numpy/arrayobject.h":
ctypedef
class
numpy
.
ndarray
[
object
PyArrayObject
]:
cdef
__cythonbufferdefaults__
=
{
"mode"
:
"strided"
}
cdef
:
# Only taking a few of the most commonly used and stable fields.
# One should use PyArray_* macros instead to access the C fields.
char
*
data
int
ndim
"nd"
npy_intp
*
shape
"dimensions"
npy_intp
*
strides
dtype
descr
PyObject
*
base
# Note: The fields are no longer defined, please use accessor
# functions. Cython special-cases/hacks the data, ndim, shape
# and stride attributes of the ndarray to use accessor
# functions for backwards compatability and convenience.
# Note: This syntax (function definition in pxd files) is an
# experimental exception made for __getbuffer__ and __releasebuffer__
...
...
@@ -236,7 +235,7 @@ cdef extern from "numpy/arrayobject.h":
cdef
int
t
cdef
char
*
f
=
NULL
cdef
dtype
descr
=
self
.
descr
cdef
dtype
descr
=
get_array_dtype
(
self
)
cdef
list
stack
cdef
int
offset
...
...
@@ -376,20 +375,29 @@ cdef extern from "numpy/arrayobject.h":
bint
PyArray_ISWRITEABLE
(
ndarray
m
)
bint
PyArray_ISALIGNED
(
ndarray
m
)
int
PyArray_NDIM
(
ndarray
)
int
PyArray_NDIM
(
ndarray
)
nogil
bint
PyArray_ISONESEGMENT
(
ndarray
)
bint
PyArray_ISFORTRAN
(
ndarray
)
int
PyArray_FORTRANIF
(
ndarray
)
void
*
PyArray_DATA
(
ndarray
)
char
*
PyArray_BYTES
(
ndarray
)
npy_intp
*
PyArray_DIMS
(
ndarray
)
npy_intp
*
PyArray_STRIDES
(
ndarray
)
npy_intp
PyArray_DIM
(
ndarray
,
size_t
)
npy_intp
PyArray_STRIDE
(
ndarray
,
size_t
)
void
*
PyArray_DATA
(
ndarray
)
nogil
char
*
PyArray_BYTES
(
ndarray
)
nogil
npy_intp
*
PyArray_DIMS
(
ndarray
)
nogil
npy_intp
*
PyArray_STRIDES
(
ndarray
)
nogil
npy_intp
PyArray_DIM
(
ndarray
,
size_t
)
nogil
npy_intp
PyArray_STRIDE
(
ndarray
,
size_t
)
nogil
# The two functions below return borrowed references and should
# be used with care; often you will want to use get_array_base
# or get_array_dtype (define below) instead from Cython.
PyObject
*
PyArray_BASE
(
ndarray
)
# Cython API of the function below might change! PyArray_DESCR
# actually returns PyArray_Descr* == pointer-version of dtype,
# which appears to be difficult to declare properly in Cython;
# protect it with trailing underscore for now just to avoid having
# user code depend on it without reading this note.
PyArray_Descr
*
PyArray_DESCR_
"PyArray_DESCR"
(
ndarray
)
# object PyArray_BASE(ndarray) wrong refcount semantics
# dtype PyArray_DESCR(ndarray) wrong refcount semantics
int
PyArray_FLAGS
(
ndarray
)
npy_intp
PyArray_ITEMSIZE
(
ndarray
)
int
PyArray_TYPE
(
ndarray
arr
)
...
...
@@ -961,18 +969,34 @@ cdef extern from "numpy/ufuncobject.h":
void
import_ufunc
()
cdef
inline
void
set_array_base
(
ndarray
arr
,
object
base
):
cdef
PyObject
*
baseptr
if
base
is
None
:
baseptr
=
NULL
else
:
Py_INCREF
(
base
)
# important to do this before decref below!
baseptr
=
<
PyObject
*>
base
Py_XDECREF
(
arr
.
base
)
arr
.
base
=
baseptr
# The ability to set the base field of an ndarray seems to be
# deprecated in NumPy 1.7 (no PyArray_SET_BASE seems to be
# available). Remove this support and see who complains and how their
# case could be fixed in 1.7...
#
#cdef inline void set_array_base(ndarray arr, object base):
# cdef PyObject* baseptr
# if base is None:
# baseptr = NULL
# else:
# Py_INCREF(base) # important to do this before decref below!
# baseptr = <PyObject*>base
# Py_XDECREF(arr.base)
# arr.base = baseptr
cdef
inline
object
get_array_base
(
ndarray
arr
):
if
arr
.
base
is
NULL
:
cdef
PyObject
*
pobj
=
PyArray_BASE
(
arr
)
if
pobj
!=
NULL
:
obj
=
<
object
>
pobj
Py_INCREF
(
obj
)
return
obj
else
:
return
None
cdef
inline
dtype
get_array_dtype
(
ndarray
arr
):
if
PyArray_DESCR_
(
arr
)
!=
NULL
:
obj
=
<
object
>
PyArray_DESCR_
(
arr
)
Py_INCREF
(
obj
)
return
obj
else
:
return
<
object
>
arr
.
bas
e
return
Non
e
Cython/Utility/Buffer.c
View file @
7ef9c72f
...
...
@@ -134,6 +134,8 @@ static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
goto
fail
;
return
func
(
obj
,
view
,
flags
);
}
else
{
PyErr_Clear
();
}
#endif
...
...
@@ -182,6 +184,8 @@ static void __Pyx_ReleaseBuffer(Py_buffer *view) {
func
(
obj
,
view
);
return
;
}
else
{
PyErr_Clear
();
}
#endif
...
...
tests/run/numpy_attributes.pyx
0 → 100644
View file @
7ef9c72f
# tag: numpy
import
numpy
as
np
cimport
numpy
as
np
def
f
():
"""
>>> f()
ndim 2
data 1
shape 3 2
shape[1] 2
strides 16 8
"""
cdef
np
.
ndarray
x
=
np
.
ones
((
3
,
2
),
dtype
=
np
.
int64
)
cdef
int
i
cdef
Py_ssize_t
j
,
k
cdef
char
*
p
# todo: int * p: 23:13: Cannot assign type 'char *' to 'int *'
with
nogil
:
i
=
x
.
ndim
print
'ndim'
,
i
with
nogil
:
p
=
x
.
data
print
'data'
,
(
<
np
.
int64_t
*>
p
)[
0
]
with
nogil
:
j
=
x
.
shape
[
0
]
k
=
x
.
shape
[
1
]
print
'shape'
,
j
,
k
# Check that non-typical uses still work
cdef
np
.
npy_intp
*
shape
with
nogil
:
shape
=
x
.
shape
+
1
print
'shape[1]'
,
shape
[
0
]
with
nogil
:
j
=
x
.
strides
[
0
]
k
=
x
.
strides
[
1
]
print
'strides'
,
j
,
k
tests/run/numpy_common.pxi
View file @
7ef9c72f
#
This file is to avoid "unused function" warnings.
#
(disabled) hack to avoid C compiler warnings about unused functions in the NumPy header files
cdef
extern
from
*
:
bint
FALSE
"0"
void
import_array
()
void
import_umath
(
)
if
FALSE
:
import_array
()
import_umath
(
)
##
cdef extern from *:
##
bint FALSE "0"
##
void import_array()
## void import_umath1(void* ret
)
##
##
if FALSE:
##
import_array()
## import_umath1(NULL
)
tests/run/numpy_memoryview.pyx
View file @
7ef9c72f
...
...
@@ -5,6 +5,8 @@
Test slicing for memoryviews and memoryviewslices
"""
import
sys
cimport
numpy
as
np
import
numpy
as
np
cimport
cython
...
...
@@ -422,9 +424,14 @@ cdef packed struct StructArray:
@
testcase_numpy_1_5
def
test_memslice_structarray
(
data
,
dtype
):
"""
>>> data = [(range(4), 'spam
\
\
0'), (range(4, 8), 'ham
\
\
0
\
\
0'), (range(8, 12), 'eggs
\
\
0')]
>>> def b(s): return s.encode('ascii')
>>> def to_byte_values(b):
... if sys.version_info[0] >= 3: return list(b)
... else: return map(ord, b)
>>> data = [(range(4), b('spam
\
\
0')), (range(4, 8), b('ham
\
\
0
\
\
0')), (range(8, 12), b('eggs
\
\
0'))]
>>> dtype = np.dtype([('a', '4i'), ('b', '5b')])
>>> test_memslice_structarray([(L,
map(ord,
s)) for L, s in data], dtype)
>>> test_memslice_structarray([(L,
to_byte_values(
s)) for L, s in data], dtype)
0
1
2
...
...
@@ -468,7 +475,7 @@ def test_memslice_structarray(data, dtype):
for
i
in
range
(
3
):
for
j
in
range
(
4
):
print
myslice
[
i
].
a
[
j
]
print
myslice
[
i
].
b
print
myslice
[
i
].
b
.
decode
(
'ASCII'
)
@
testcase_numpy_1_5
def
test_structarray_errors
(
StructArray
[:]
a
):
...
...
@@ -520,8 +527,9 @@ def stringtest(String[:] view):
@
testcase_numpy_1_5
def
test_string_invalid_dims
():
"""
>>> def b(s): return s.encode('ascii')
>>> dtype = np.dtype([('a', 'S4')])
>>> data = [
'spam', 'eggs'
]
>>> data = [
b('spam'), b('eggs')
]
>>> stringstructtest(np.array(data, dtype=dtype))
Traceback (most recent call last):
...
...
...
tests/run/numpy_test.pyx
View file @
7ef9c72f
...
...
@@ -208,26 +208,29 @@ try:
>>> test_point_record()
array([(0.0, 0.0), (1.0, -1.0), (2.0, -2.0)],
dtype=[('x', '!f8'), ('y', '!f8')])
"""
if
np
.
__version__
>=
'1.6'
:
if
np
.
__version__
>=
'1.6'
and
False
:
__doc__
+=
u"""
Tests are DISABLED as the buffer format parser does not align members
of aligned structs in padded structs in relation to the possibly
unaligned initial offset.
The following expose bugs in Numpy (versions prior to 2011-04-02):
>>> print(test_partially_packed_align(np.zeros((1,), dtype=np.dtype([('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')], align=True))))
array([(22, 23, (24, 25), 26)],
array([(22, 23, (24, 25), 26)],
dtype=[('a', '|i1'), ('', '|V3'), ('b', '!i4'), ('sub', [('f0', '|i1'), ('f1', '!i4')]), ('', '|V3'), ('c', '!i4')])
>>> print(test_partially_packed_align_2(np.zeros((1,), dtype=np.dtype([('a', 'b'), ('b', 'i'), ('c', 'b'), ('sub', np.dtype('b,i', align=True))]))))
array([(22, 23, 24, (27, 28))],
array([(22, 23, 24, (27, 28))],
dtype=[('a', '|i1'), ('b', '!i4'), ('c', '|i1'), ('sub', [('f0', '|i1'), ('', '|V3'), ('f1', '!i4')])])
>>> print(test_partially_packed_align(np.zeros((1,), dtype=np.dtype([('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')], align=False)))) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...
>>> print(test_partially_packed_align_2(np.zeros((1,), dtype=np.dtype([('a', 'b'), ('b', 'i'), ('c', 'b'), ('sub', np.dtype('b,i', align=False))])))) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
...
...
@@ -257,7 +260,7 @@ def ndarray_str(arr):
Since Py2.3 doctest don't support <BLANKLINE>, manually replace blank lines
with <_BLANKLINE_>
"""
return
unicode
(
arr
).
replace
(
u'
\
n
\
n
'
,
u'
\
n
<_BLANKLINE_>
\
n
'
)
return
unicode
(
arr
).
replace
(
u'
\
n
\
n
'
,
u'
\
n
<_BLANKLINE_>
\
n
'
)
def
basic
():
cdef
object
[
int
,
ndim
=
2
]
buf
=
np
.
arange
(
10
,
dtype
=
'i'
).
reshape
((
2
,
5
))
...
...
tests/run/reversed_iteration.pyx
View file @
7ef9c72f
...
...
@@ -133,6 +133,25 @@ def reversed_range_step_neg(int a, int b):
result
.
append
(
i
)
return
result
,
i
#@cython.test_assert_path_exists('//ForFromStatNode')
def
reversed_range_step3
(
int
a
,
int
b
):
"""
>>> [ i for i in _reversed(range(0, 5, 3)) ]
[3, 0]
>>> reversed_range_step3(0, 5)
([3, 0], 0)
>>> [ i for i in _reversed(range(5, 0, 3)) ]
[]
>>> reversed_range_step3(5, 0)
([], 99)
"""
cdef
int
i
=
99
result
=
[]
for
i
in
reversed
(
range
(
a
,
b
,
3
)):
result
.
append
(
i
)
return
result
,
i
unicode_string
=
u"abcDEF"
@
cython
.
test_assert_path_exists
(
'//ForFromStatNode'
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment