Commit 480c9784 authored by Robert Bradshaw's avatar Robert Bradshaw

Merge branch 'formal grammar'

parents 5bdca351 60676a6c
......@@ -304,6 +304,13 @@ class Context(object):
s = PyrexScanner(f, source_desc, source_encoding = f.encoding,
scope = scope, context = self)
tree = Parsing.p_module(s, pxd, full_module_name)
if self.options.formal_grammar:
try:
from ..Parser import ConcreteSyntaxTree
except ImportError:
raise RuntimeError(
"Formal grammer can only be used with compiled Cython with an available pgen.")
ConcreteSyntaxTree.p_module(source_filename)
finally:
f.close()
except UnicodeDecodeError, e:
......@@ -475,6 +482,7 @@ class CompilationOptions(object):
compiler_directives dict Overrides for pragma options (see Options.py)
evaluate_tree_assertions boolean Test support: evaluate parse tree assertions
language_level integer The Python language level: 2 or 3
formal_grammar boolean Parse the file with the formal grammar
cplus boolean Compile as c++ code
"""
......@@ -507,6 +515,8 @@ class CompilationOptions(object):
options['compiler_directives'] = directives
if 'language_level' in directives and 'language_level' not in kw:
options['language_level'] = int(directives['language_level'])
if 'formal_grammar' in directives and 'formal_grammar' not in kw:
options['formal_grammar'] = directives['formal_grammar']
if 'cache' in options:
if options['cache'] is True:
options['cache'] = os.path.expanduser("~/.cycache")
......@@ -685,6 +695,7 @@ default_options = dict(
relative_path_in_code_position_comments = True,
c_line_in_traceback = True,
language_level = 2,
formal_grammar = False,
gdb_debug = False,
compile_time_env = None,
common_utility_include_dir = None,
......
......@@ -148,6 +148,8 @@ directive_defaults = {
# experimental, subject to change
'binding': None,
'freelist': 0,
'formal_grammar': False,
}
# Extra warning directives
......
cdef extern from "graminit.c":
ctypedef struct grammar:
pass
cdef grammar _PyParser_Grammar
cdef int Py_file_input
cdef extern from "node.h":
ctypedef struct node
void PyNode_Free(node* n)
int NCH(node* n)
node* CHILD(node* n, int ix)
node* RCHILD(node* n, int ix)
short TYPE(node* n)
char* STR(node* n)
cdef extern from "parsetok.h":
ctypedef struct perrdetail:
pass
cdef void PyParser_SetError(perrdetail *err) except *
cdef node * PyParser_ParseStringFlagsFilenameEx(
const char * s,
const char * filename,
grammar * g,
int start,
perrdetail * err_ret,
int * flags)
import distutils.sysconfig
import os
import re
def extract_names(path):
# All parse tree types are #defined in these files as ints.
type_names = {}
for line in open(path):
if line.startswith('#define'):
try:
_, name, value = line.strip().split()
type_names[int(value)] = name
except:
pass
return type_names
cdef dict type_names = {}
cdef print_tree(node* n, indent=""):
if not type_names:
type_names.update(extract_names(
os.path.join(distutils.sysconfig.get_python_inc(), 'token.h')))
type_names.update(extract_names(
os.path.join(os.path.dirname(__file__), 'graminit.h')))
print indent, type_names.get(TYPE(n), 'unknown'), <object>STR(n) if NCH(n) == 0 else NCH(n)
indent += " "
for i in range(NCH(n)):
print_tree(CHILD(n, i), indent)
def handle_includes(source, path):
# TODO: Use include directory.
def include_here(include_line):
included = os.path.join(os.path.dirname(path), include_line.group(1)[1:-1])
if not os.path.exists(included):
return include_line.group(0) + ' # no such path: ' + included
return handle_includes(open(included).read(), path)
# TODO: Proper string tokenizing.
return re.sub(r'^include\s+([^\n]+[\'"])\s*(#.*)?$', include_here, source, flags=re.M)
def p_module(path):
cdef perrdetail err
cdef int flags
cdef node* n
source = open(path).read()
if '\ninclude ' in source:
# TODO: Tokanizer needs to understand includes.
source = handle_includes(source, path)
path = "preparse(%s)" % path
n = PyParser_ParseStringFlagsFilenameEx(
source,
path,
&_PyParser_Grammar,
Py_file_input,
&err,
&flags)
if n:
# print_tree(n)
PyNode_Free(n)
else:
PyParser_SetError(&err)
# Grammar for Cython, based on the Grammar for Python 3
# Note: This grammar is not yet used by the Cython parser and is subject to change.
# Start symbols for the grammar:
# single_input is a single interactive statement;
# file_input is a module or sequence of commands read from an input file;
# eval_input is the input for the eval() functions.
# NB: compound_stmt in single_input is followed by extra NEWLINE!
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
file_input: (NEWLINE | stmt)* ENDMARKER
eval_input: testlist NEWLINE* ENDMARKER
decorator: '@' dotted_PY_NAME [ '(' [arglist] ')' ] NEWLINE
decorators: decorator+
decorated: decorators (classdef | funcdef | cdef_stmt)
funcdef: 'def' PY_NAME parameters ['->' test] ':' suite
parameters: '(' [typedargslist] ')'
typedargslist: (tfpdef ['=' (test | '*')] (',' tfpdef ['=' (test | '*')])* [','
['*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef]]
| '*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef) [',' ellipsis]
tfpdef: maybe_typed_name [('not' | 'or') 'None'] [':' test]
varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [','
['*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef]]
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef)
vfpdef: maybe_typed_name ['not' 'None']
stmt: simple_stmt | compound_stmt | cdef_stmt | ctypedef_stmt | DEF_stmt | IF_stmt
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
import_stmt | global_stmt | nonlocal_stmt | assert_stmt | print_stmt)
expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) |
('=' (yield_expr|testlist_star_expr))*)
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
'<<=' | '>>=' | '**=' | '//=')
print_stmt: 'print' ( [ test (',' test)* [','] ] |
'>>' test [ (',' test)+ [','] ] )
# For normal assignments, additional restrictions enforced by the interpreter
del_stmt: 'del' exprlist
pass_stmt: 'pass'
flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
break_stmt: 'break'
continue_stmt: 'continue'
return_stmt: 'return' [testlist]
yield_stmt: yield_expr
raise_stmt: 'raise' [test ['from' test]]
# raise_stmt: 'raise' [test [',' test [',' test]]]
import_stmt: import_PY_NAME | import_from
import_PY_NAME: ('import' | 'cimport') dotted_as_PY_NAMEs
# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS
import_from: ('from' (('.' | '...')* dotted_PY_NAME | ('.' | '...')+)
('import' | 'cimport') ('*' | '(' import_as_PY_NAMEs ')' | import_as_PY_NAMEs))
import_as_PY_NAME: PY_NAME ['as' PY_NAME]
dotted_as_PY_NAME: dotted_PY_NAME ['as' PY_NAME]
import_as_PY_NAMEs: import_as_PY_NAME (',' import_as_PY_NAME)* [',']
dotted_as_PY_NAMEs: dotted_as_PY_NAME (',' dotted_as_PY_NAME)*
dotted_PY_NAME: PY_NAME ('.' PY_NAME)*
global_stmt: 'global' PY_NAME (',' PY_NAME)*
nonlocal_stmt: 'nonlocal' PY_NAME (',' PY_NAME)*
exec_stmt: 'exec' expr ['in' test [',' test]]
assert_stmt: 'assert' test [',' test]
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated
if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
while_stmt: 'while' test ':' suite ['else' ':' suite]
for_stmt: 'for' exprlist ('in' testlist | for_from_clause)':' suite ['else' ':' suite]
for_from_clause: 'from' expr comp_op PY_NAME comp_op expr ['by' expr]
try_stmt: ('try' ':' suite
((except_clause ':' suite)+
['else' ':' suite]
['finally' ':' suite] |
'finally' ':' suite))
with_stmt: 'with' with_item (',' with_item)* ':' suite
with_item: test ['as' expr]
# NB compile.c makes sure that the default except clause is last
except_clause: 'except' [test [('as' | ',') test]]
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
test: or_test ['if' or_test 'else' test] | lambdef
test_nocond: or_test | lambdef_nocond
lambdef: 'lambda' [varargslist] ':' test
lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
or_test: and_test ('or' and_test)*
and_test: not_test ('and' not_test)*
not_test: 'not' not_test | comparison
comparison: expr (comp_op expr)*
# <> isn't actually a valid comparison operator in Python. It's here for the
# sake of a __future__ import described in PEP 401
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
star_expr: '*' expr
expr: xor_expr ('|' xor_expr)*
xor_expr: and_expr ('^' and_expr)*
and_expr: shift_expr ('&' shift_expr)*
shift_expr: arith_expr (('<<'|'>>') arith_expr)*
arith_expr: term (('+'|'-') term)*
term: factor (('*'|'/'|'%'|'//') factor)*
factor: ('+'|'-'|'~') factor | power | address | size_of | cast
power: atom trailer* ['**' factor]
atom: ('(' [yield_expr|testlist_comp] ')' |
'[' [testlist_comp] ']' |
'{' [dictorsetmaker] '}' |
new_expr |
PY_NAME | NUMBER | STRING+ | ellipsis | 'None' | 'True' | 'False')
testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' (PY_NAME | 'sizeof')
subscriptlist: subscript (',' subscript)* [',']
subscript: test | [test] ':' [test] [sliceop]
sliceop: ':' [test]
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
testlist: test (',' test)* [',']
dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
(test (comp_for | (',' test)* [','])) )
classdef: 'class' PY_NAME ['(' [arglist] ')'] ':' suite
arglist: (argument ',')* (argument [',']
|'*' test (',' argument)* [',' '**' test]
|'**' test)
# The reason that keywords are test nodes instead of NAME is that using NAME
# results in an ambiguity. ast.c makes sure it's a NAME.
argument: test [comp_for] | test '=' test # Really [keyword '='] test
comp_iter: comp_for | comp_if
comp_for: 'for' exprlist ('in' or_test | for_from_clause) [comp_iter]
comp_if: 'if' test_nocond [comp_iter]
# not used in grammar, but may appear in "node" passed from Parser to Compiler
encoding_decl: NAME
yield_expr: 'yield' [yield_arg]
yield_arg: 'from' test | testlist
# Cython extensions
# Accommodate to Py2 tokenizer.
ellipsis: '...' | '.' '.' '.'
signedness: 'unsigned' | 'signed'
longness: 'char' | 'short' | 'long' | 'long' 'long'
# TODO: [unsigned] double doesn't make sens, but we need long double
int_type: signedness [longness] | longness | [signedness] [longness] ('int' | 'double') | 'complex'
type: ['const'] (NAME ('.' PY_NAME)* | int_type | '(' type ')') ['complex'] [type_qualifiers]
maybe_typed_name: ['const'] (NAME [('.' PY_NAME)* ['complex'] [type_qualifiers] NAME] | (int_type | '(' type ')') ['complex'] [type_qualifiers] NAME)
teplate_params: '[' NAME (',' NAME)* ']'
type_qualifiers: type_qualifier+
type_qualifier: '*' | '**' | '&' | type_index ('.' NAME [type_index])*
# TODO: old buffer syntax
type_index: '[' [(NUMBER | type (',' type)* | (memory_view_index (',' memory_view_index)*))] ']'
memory_view_index: ':' [':'] [NUMBER]
address: '&' factor
cast: '<' type ['?'] '>' factor
size_of: 'sizeof' '(' (type) ')'
new_expr: 'new' type '(' [arglist] ')'
# TODO: Restrict cdef_stmt to "top-level" statements.
cdef_stmt: ('cdef' | 'cpdef') (cvar_def | cdef_type_decl | extern_block)
cdef_type_decl: ctype_decl | fused | cclass
ctype_decl: struct | enum | cppclass
# TODO: Does the cdef/ctypedef distinction even make sense for fused?
ctypedef_stmt: 'ctypedef' (cvar_decl | struct | enum | fused)
# Note: these two are similar but can't be used in an or clause
# as it would cause ambiguity in the LL(1) parser.
# Requires a type
cvar_decl: [visibility] type cname (NEWLINE | cfunc)
# Allows an assignment
cvar_def: [visibility] maybe_typed_name (['=' test] (',' PY_NAME ['=' test])* NEWLINE | cfunc)
visibility: 'public' | 'api' | 'readonly'
# TODO: Standardize gil_spec first or last.
cfunc: [teplate_params] parameters [gil_spec] [exception_value] [gil_spec] (':' suite | NEWLINE)
exception_value: 'except' (['?'] expr | '*' | '+' [PY_NAME])
gil_spec: 'with' ('gil' | 'nogil') | 'nogil'
cname: NAME [STRING]
cclass: classdef
fused: 'fused' PY_NAME ':' NEWLINE INDENT ( type NEWLINE)+ DEDENT
enum: 'enum' [cname] (NEWLINE | ':' enum_suite)
enum_suite: NEWLINE INDENT (cname ['=' NUMBER] NEWLINE | pass_stmt NEWLINE)+ DEDENT
struct: ('struct' | 'union') cname (NEWLINE | (':' struct_suite))
struct_suite: NEWLINE INDENT (cvar_decl | pass_stmt NEWLINE)+ DEDENT
cppclass: 'cppclass' cname [teplate_params] [cppclass_bases] (NEWLINE | ':' cppclass_suite)
cppclass_bases: '(' dotted_PY_NAME (',' dotted_PY_NAME [teplate_params])*')'
cppclass_suite: NEWLINE INDENT (cvar_decl | ctype_decl | pass_stmt NEWLINE)+ DEDENT
# TODO: C++ constructors, operators
extern_block: 'extern' (cvar_decl | 'from' ('*' | STRING) ['namespace' STRING] [gil_spec] ':' (pass_stmt | extern_suite))
extern_suite: NEWLINE INDENT (['cdef' | 'cpdef'] (cvar_decl | cdef_type_decl) | ctypedef_stmt)+ DEDENT
cy_type_kwd: 'struct' | 'union' | 'fused' | 'cppclass' | 'int' | 'double' | 'complex'
cy_kwd: cy_type_kwd | signedness | longness | visibility | 'gil' | 'nogil' | 'namespace' | 'const' | 'by' | 'extern'
PY_NAME: NAME | cy_kwd
# TODO: Do we really want these? Don't play well with include...
DEF_stmt: 'DEF' NAME '=' testlist
IF_stmt: 'IF' test ':' suite ('ELIF' test ':' suite)* ['ELSE' ':' suite]
# Moves
#
# use: sed [-E | -r] -i -f move-declarators.sed [files]
# Arrays
# cdef int a[5] -> cdef int[5] a
s/^([ \t]*)cdef +([_0-9a-zA-Z. ]+) +([_0-9a-zA-Z]+)((\[[0-9]*\])+)$/\1cdef \2\4 \3/
# Pointers
# cdef int a, *b -> cdef int a \n cdef int *b
s/^([ \t]*)cdef +([_0-9a-zA-Z. ]+)( +[_0-9a-zA-Z]+ +(=[^()]+)?),( *[*]+ *)([^()]+)/\1cdef \2\3\
\1cdef \2\5\6/
s/^([ \t]*)cdef +([_0-9a-zA-Z. ]+)( +[_0-9a-zA-Z]+ +(=[^()]+)?),( *[*]+ *)([^()]+)/\1cdef \2\3\
\1cdef \2\5\6/
s/^([ \t]*)cdef +([_0-9a-zA-Z. ]+)( +[_0-9a-zA-Z]+ +(=[^()]+)?),( *[*]+ *)([^()]+)/\1cdef \2\3\
\1cdef \2\5\6/
s/^([ \t]*)cdef +([_0-9a-zA-Z. ]+)( +[_0-9a-zA-Z]+ +(=[^()]+)?),( *[*]+ *)([^()]+)/\1cdef \2\3\
\1cdef \2\5\6/
s/^([ \t]*)cdef +([_0-9a-zA-Z. ]+)( +[_0-9a-zA-Z]+ +(=[^()]+)?),( *[*]+ *)([^()]+)/\1cdef \2\3\
\1cdef \2\5\6/
......@@ -1743,6 +1743,7 @@ def main():
parser.add_option("--watermark", dest="watermark", default=None,
help="deterministic generated by string")
parser.add_option("--use_common_utility_dir", default=False, action="store_true")
parser.add_option("--use_formal_grammar", default=False, action="store_true")
options, cmd_args = parser.parse_args()
......@@ -1802,6 +1803,7 @@ def main():
Options.generate_cleanup_code = 3 # complete cleanup code
from Cython.Compiler import DebugFlags
DebugFlags.debug_temp_code_comments = 1
pyrex_default_options['formal_grammar'] = options.use_formal_grammar
if options.shard_count > 1 and options.shard_num == -1:
import multiprocessing
......
......@@ -4,6 +4,8 @@ try:
except ImportError:
from distutils.core import setup, Extension
import os
import stat
import subprocess
import sys
import platform
......@@ -116,6 +118,29 @@ def compile_cython_modules(profile=False, compile_more=False, cython_with_refnan
"Cython.Compiler.Optimize",
])
from distutils.spawn import find_executable
from distutils.sysconfig import get_python_inc
pgen = find_executable(
'pgen', os.pathsep.join([os.environ['PATH'], os.path.join(get_python_inc(), '..', 'Parser')]))
if not pgen:
print ("Unable to find pgen, not compiling formal grammar.")
else:
parser_dir = os.path.join(os.path.dirname(__file__), 'Cython', 'Parser')
grammar = os.path.join(parser_dir, 'Grammar')
subprocess.check_call([
pgen,
os.path.join(grammar),
os.path.join(parser_dir, 'graminit.h'),
os.path.join(parser_dir, 'graminit.c'),
])
cst_pyx = os.path.join(parser_dir, 'ConcreteSyntaxTree.pyx')
if os.stat(grammar)[stat.ST_MTIME] > os.stat(cst_pyx)[stat.ST_MTIME]:
mtime = os.stat(grammar)[stat.ST_MTIME]
os.utime(cst_pyx, (mtime, mtime))
compiled_modules.extend([
"Cython.Parser.ConcreteSyntaxTree",
])
defines = []
if cython_with_refnanny:
defines.append(('CYTHON_REFNANNY', '1'))
......
......@@ -6,7 +6,7 @@ cdef enum E:
cdef void f():
cdef int *p
cdef void *v
cdef int a[5]
cdef int[5] a
cdef int i=0
cdef E e=z
p = a
......
......@@ -10,7 +10,7 @@ cdef class Spam:
cdef public float f
cdef public double d
cdef public char *s
cdef readonly char a[42]
cdef readonly char[42] a
cdef public object o
cdef readonly int r
cdef readonly Spam e
......@@ -3,7 +3,7 @@
def f(obj1, obj2, obj3):
cdef int int1, int2=0, int3=0
cdef float flt1, *ptr1=NULL
cdef int array1[42]
cdef int[42] array1
array1[int2] = 0
int1 = array1[int2]
flt1 = ptr1[int2]
......
......@@ -7,7 +7,7 @@ def test():
cdef int i = 1, j = 2, k = 3
cdef float x = 1, y = 2, z = 3
cdef object a = 1, b = 2, c = 3, d = 4, e = 5
cdef int m[3]
cdef int[3] m
m[0] = 0
m[1] = 1
m[2] = 1
......
......@@ -4,7 +4,7 @@ cdef extern from "string.h":
void memcpy(void* des, void* src, int size)
cdef void f():
cdef float f1[3]
cdef float[3] f1
cdef float* f2
f2 = f1 + 1
memcpy(f1, f2, 1)
......
# mode: compile
cdef class T:
cdef int a[1]
cdef int[1] a
cdef object b
......
......@@ -50,7 +50,7 @@ baz[0] = d
cdef int *baz
print var[0][0]
cdef unsigned long long var[100][100]
cdef unsigned long long[100][100] var
# in 0.11.1 these are warnings
FUTURE_ERRORS = u"""
......
......@@ -184,7 +184,7 @@ def test_cyarray_from_carray():
0 8 21
0 8 21
"""
cdef int a[7][8]
cdef int[7][8] a
for i in range(7):
for j in range(8):
a[i][j] = i * 8 + j
......
......@@ -958,7 +958,7 @@ def test_contig_scalar_to_slice_assignment():
14 14 14 14
20 20 20 20
"""
cdef int a[5][10]
cdef int[5][10] a
cdef int[:, ::1] _m = a
m = _m
......
......@@ -1318,7 +1318,7 @@ cdef class TestIndexSlicingDirectIndirectDims(object):
cdef Py_ssize_t[3] shape, strides, suboffsets
cdef int c_array[5]
cdef int[5] c_array
cdef int *myarray[5][5]
cdef bytes format
......@@ -1643,8 +1643,8 @@ def test_memslice_struct_with_arrays():
abc
abc
"""
cdef ArrayStruct a1[10]
cdef PackedArrayStruct a2[10]
cdef ArrayStruct[10] a1
cdef PackedArrayStruct[10] a2
test_structs_with_arr(a1)
test_structs_with_arr(a2)
......@@ -1756,14 +1756,14 @@ def test_padded_structs():
"""
>>> test_padded_structs()
"""
cdef ArrayStruct a1[10]
cdef PackedArrayStruct a2[10]
cdef AlignedNested a3[10]
cdef AlignedNestedNormal a4[10]
cdef A a5[10]
cdef B a6[10]
cdef C a7[10]
cdef D a8[10]
cdef ArrayStruct[10] a1
cdef PackedArrayStruct[10] a2
cdef AlignedNested[10] a3
cdef AlignedNestedNormal[10] a4
cdef A[10] a5
cdef B[10] a6
cdef C[10] a7
cdef D[10] a8
_test_padded(a1)
_test_padded(a2)
......@@ -1790,7 +1790,7 @@ def test_object_indices():
1
2
"""
cdef int array[3]
cdef int[3] array
cdef int[:] myslice = array
cdef int j
......@@ -1831,7 +1831,7 @@ def test_slice_assignment():
"""
>>> test_slice_assignment()
"""
cdef int carray[10][100]
cdef int[10][100] carray
cdef int i, j
for i in range(10):
......@@ -1860,8 +1860,8 @@ def test_slice_assignment_broadcast_leading():
"""
>>> test_slice_assignment_broadcast_leading()
"""
cdef int array1[1][10]
cdef int array2[10]
cdef int[1][10] array1
cdef int[10] array2
cdef int i
for i in range(10):
......@@ -1892,8 +1892,8 @@ def test_slice_assignment_broadcast_strides():
"""
>>> test_slice_assignment_broadcast_strides()
"""
cdef int src_array[10]
cdef int dst_array[10][5]
cdef int[10] src_array
cdef int[10][5] dst_array
cdef int i, j
for i in range(10):
......@@ -2042,7 +2042,7 @@ def test_scalar_slice_assignment():
cdef int[10] a
cdef int[:] m = a
cdef int a2[5][10]
cdef int[5][10] a2
cdef int[:, ::1] m2 = a2
_test_scalar_slice_assignment(m, m2)
......@@ -2098,7 +2098,7 @@ def test_contig_scalar_to_slice_assignment():
14 14 14 14
20 20 20 20
"""
cdef int a[5][10]
cdef int[5][10] a
cdef int[:, ::1] m = a
m[...] = 14
......
......@@ -296,28 +296,28 @@ def test_coerce_to_numpy():
#
### First set up some C arrays that will be used to hold data
#
cdef MyStruct mystructs[20]
cdef SmallStruct smallstructs[20]
cdef NestedStruct nestedstructs[20]
cdef PackedStruct packedstructs[20]
cdef MyStruct[20] mystructs
cdef SmallStruct[20] smallstructs
cdef NestedStruct[20] nestedstructs
cdef PackedStruct[20] packedstructs
cdef signed char chars[20]
cdef short shorts[20]
cdef int ints[20]
cdef long long longlongs[20]
cdef td_h_short externs[20]
cdef signed char[20] chars
cdef short[20] shorts
cdef int[20] ints
cdef long long[20] longlongs
cdef td_h_short[20] externs
cdef float floats[20]
cdef double doubles[20]
cdef long double longdoubles[20]
cdef float[20] floats
cdef double[20] doubles
cdef long double[20] longdoubles
cdef float complex floatcomplex[20]
cdef double complex doublecomplex[20]
cdef long double complex longdoublecomplex[20]
cdef float complex[20] floatcomplex
cdef double complex[20] doublecomplex
cdef long double complex[20] longdoublecomplex
cdef td_h_short h_shorts[20]
cdef td_h_double h_doubles[20]
cdef td_h_ushort h_ushorts[20]
cdef td_h_short[20] h_shorts
cdef td_h_double[20] h_doubles
cdef td_h_ushort[20] h_ushorts
cdef Py_ssize_t idx = 17
......@@ -586,7 +586,7 @@ cdef getbuffer(Buffer self, Py_buffer *info):
info.format = self.format
cdef class Buffer(object):
cdef Py_ssize_t _shape[2]
cdef Py_ssize_t[2] _shape
cdef bytes format
cdef float[:, :] m
cdef object shape, strides
......
......@@ -153,7 +153,7 @@ def slice_charptr_for_loop_c_enumerate():
############################################################
# tests for int* slicing
cdef int cints[6]
cdef int[6] cints
for i in range(6):
cints[i] = i
......@@ -207,7 +207,7 @@ def slice_intptr_for_loop_c():
############################################################
# tests for slicing other arrays
cdef double cdoubles[6]
cdef double[6] cdoubles
for i in range(6):
cdoubles[i] = i + 0.5
......@@ -267,7 +267,7 @@ def struct_ptr_iter():
>>> struct_ptr_iter()
([0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4])
"""
cdef MyStruct my_structs[5]
cdef MyStruct[5] my_structs
for i in range(5):
my_structs[i].i = i
cdef MyStruct value
......
......@@ -3,7 +3,7 @@ def test1():
>>> test1()
2
"""
cdef int x[2][2]
cdef int[2][2] x
x[0][0] = 1
x[0][1] = 2
x[1][0] = 3
......@@ -19,7 +19,7 @@ def test2():
>>> test2()
0
"""
cdef int a1[5]
cdef int[5] a1
cdef int a2[2+3]
return sizeof(a1) - sizeof(a2)
......
......@@ -195,7 +195,7 @@ def varargs():
>>> print(varargs())
abc
"""
cdef char buffer[10]
cdef char[10] buffer
retval = snprintf(buffer, template="abc", size=10)
if retval < 0:
raise MemoryError()
......
......@@ -7,7 +7,8 @@ def test(x, int y):
"""
if True:
before = 0
cdef int a = 4, b = x, c = y, *p = &y
cdef int a = 4, b = x, c = y
cdef int *p = &y
cdef object o = int(8)
print a, b, c, p[0], before, g, o
......
......@@ -23,7 +23,7 @@ else:
"""
def test_assign():
cdef char s[17]
cdef char[17] s
s[ 0] = c'\x10'
s[ 1] = c'\x21'
......
......@@ -11,7 +11,7 @@ else:
"""
def test():
cdef char s[50]
cdef char[50] s
s[ 0] = c'\0'
s[ 1] = c'\x01'
......
......@@ -16,7 +16,7 @@ def eggs():
>>> print(str(eggs()).replace("b'", "'"))
('abcdefg', 'abcdefg')
"""
cdef char silly[42]
cdef char[42] silly
cdef Grail grail
spam(silly)
spam(grail.silly)
......
......@@ -84,7 +84,7 @@ def test_copy(char *a):
True
"""
cdef string t = string(a)
cdef char buffer[6]
cdef char[6] buffer
cdef size_t length = t.copy(buffer, 4, 1)
buffer[length] = c'\0'
return buffer
......
......@@ -15,8 +15,8 @@ def f():
cdef char *a_char_ptr, *another_char_ptr
cdef char **a_char_ptr_ptr
cdef char ***a_char_ptr_ptr_ptr
cdef char a_sized_char_array[10]
cdef char a_2d_char_array[10][20]
cdef char[10] a_sized_char_array
cdef char[10][20] a_2d_char_array
cdef char *a_2d_char_ptr_array[10][20]
cdef char **a_2d_char_ptr_ptr_array[10][20]
cdef int (*a_0arg_function)()
......
......@@ -9,7 +9,7 @@ def libc_cimports():
>>> libc_cimports()
hello
"""
cdef char buf[10]
cdef char[10] buf
sprintf(buf, "%s", b'hello')
print (<object>buf).decode('ASCII')
......
......@@ -3,6 +3,6 @@ def test():
>>> test()
1.0
"""
cdef float v[10][10]
cdef float[10][10] v
v[1][2] = 1.0
return v[1][2]
......@@ -62,7 +62,7 @@ def c_enumerate_carray_target():
3 4
"""
cdef int k
cdef int i[1]
cdef int[1] i
for i[0],k in enumerate(range(1,5)):
print i[0], k
......
......@@ -94,10 +94,10 @@ def test_fused_with_pointer():
breakfast
humptydumptyfallsplatchbreakfast
"""
cdef int int_array[5]
cdef long long_array[5]
cdef float float_array[5]
cdef string_t string_array[5]
cdef int[5] int_array
cdef long[5] long_array
cdef float[5] float_array
cdef string_t[5] string_array
cdef char *s
......
......@@ -111,7 +111,7 @@ def test_side_effects():
a[side_effect(1)] += 10
a[c_side_effect(2)] += 100
cdef int i
cdef int b[5]
cdef int[5] b
for i from 0 <= i < 5:
b[i] = i
b[side_effect(3)] += 10
......
......@@ -9,7 +9,7 @@ __doc__ = u"""
"""
cdef class A:
cdef double x[3]
cdef double[3] x
def __init__(self, *args):
cdef int i, max
......
cdef int data[10]
cdef int[10] data
cdef int[:] myslice = data
def test_memoryview_namespace():
......
......@@ -17,7 +17,7 @@ ctypedef Py_UNICODE* LPWSTR
cdef unicode uobj = u'unicode\u1234'
cdef unicode uobj1 = u'u'
cdef Py_UNICODE* c_pu_str = u"unicode\u1234"
cdef Py_UNICODE c_pu_arr[42]
cdef Py_UNICODE[42] c_pu_arr
cdef LPWSTR c_wstr = u"unicode\u1234"
cdef Py_UNICODE* c_pu_empty = u""
cdef char* c_empty = ""
......
......@@ -4,7 +4,7 @@ def primes(int kmax):
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71]
"""
cdef int n, k, i
cdef int p[1000]
cdef int[1000] p
result = []
if kmax > 1000:
kmax = 1000
......
......@@ -14,6 +14,6 @@ cdef extern from "string.h":
from cpython cimport PyUnicode_DecodeUTF8
def spam():
cdef char buf[12]
cdef char[12] buf
memcpy(buf, "Ftang\0Ftang!", sizeof(buf))
return PyUnicode_DecodeUTF8(buf, sizeof(buf), NULL)
......@@ -25,7 +25,7 @@ def nonzero(int x):
from libc.string cimport strcpy
cdef char error_msg[256]
cdef char[256] error_msg
cdef jmp_buf error_ctx
cdef void error(char msg[]) nogil:
strcpy(error_msg,msg)
......
......@@ -387,7 +387,7 @@ def loop_over_struct_ptr():
>>> print( loop_over_struct_ptr() )
MyStruct
"""
cdef MyStruct a_list[10]
cdef MyStruct[10] a_list
cdef MyStruct *a_ptr = a_list
for i in a_list[:10]:
pass
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment