Commit fb59a89d authored by Dag Sverre Seljebotn's avatar Dag Sverre Seljebotn

merge

parents 4a7bea78 ade90e67
966abe58538dfbdaccd53bd970d4998c78ea522e 0.9.6.14
67ee5a34bfc662e4e3cf989c2c8bf78a412ae8f4 0.9.8rc1
16a746d969e2654112fc0dc081690b891c496977 Version-0.9.8
a09347d7b470290076b983aef98707921445a038 0.9.8.1
......@@ -17,10 +17,10 @@ special_chars = [(u'<', u'\xF0', u'&lt;'),
class AnnotationCCodeWriter(CCodeWriter):
def __init__(self, create_from=None, buffer=None):
CCodeWriter.__init__(self, create_from, buffer)
self.annotation_buffer = StringIO()
def __init__(self, create_from=None, buffer=None, copy_formatting=True):
CCodeWriter.__init__(self, create_from, buffer, copy_formatting=True)
if create_from is None:
self.annotation_buffer = StringIO()
self.annotations = []
self.last_pos = None
self.code = {}
......@@ -29,7 +29,8 @@ class AnnotationCCodeWriter(CCodeWriter):
self.annotation_buffer = create_from.annotation_buffer
self.annotations = create_from.annotations
self.code = create_from.code
self.last_pos = create_from.last_pos
def create_new(self, create_from, buffer, copy_formatting):
return AnnotationCCodeWriter(create_from, buffer, copy_formatting)
......
......@@ -3,7 +3,7 @@ from Cython.Compiler.ModuleNode import ModuleNode
from Cython.Compiler.Nodes import *
from Cython.Compiler.ExprNodes import *
from Cython.Compiler.TreeFragment import TreeFragment
from Cython.Utils import EncodedString
from Cython.Compiler.StringEncoding import EncodedString
from Cython.Compiler.Errors import CompileError
import Interpreter
import PyrexTypes
......@@ -115,6 +115,7 @@ class IntroduceBufferAuxiliaryVars(CythonTransform):
#
buffer_options = ("dtype", "ndim", "mode") # ordered!
buffer_defaults = {"ndim": 1, "mode": "full"}
buffer_positional_options_count = 1 # anything beyond this needs keyword argument
ERR_BUF_OPTION_UNKNOWN = '"%s" is not a buffer option'
ERR_BUF_TOO_MANY = 'Too many buffer options'
......@@ -140,14 +141,14 @@ def analyse_buffer_options(globalpos, env, posargs, dictargs, defaults=None, nee
posargs, dictargs = Interpreter.interpret_compiletime_options(posargs, dictargs, type_env=env)
if len(posargs) > len(buffer_options):
if len(posargs) > buffer_positional_options_count:
raise CompileError(posargs[-1][1], ERR_BUF_TOO_MANY)
options = {}
for name, (value, pos) in dictargs.iteritems():
if not name in buffer_options:
raise CompileError(pos, ERR_BUF_OPTION_UNKNOWN % name)
options[name] = value
options[name.encode("ASCII")] = value
for name, (value, pos) in zip(buffer_options, posargs):
if not name in buffer_options:
......@@ -453,14 +454,14 @@ def get_ts_check_item(dtype, writer):
if not writer.globalstate.has_utility_code(name):
char = dtype.typestring
if char is not None:
# Can use direct comparison
# Can use direct comparison
code = dedent("""\
if (*ts == '1') ++ts;
if (*ts != '%s') {
PyErr_Format(PyExc_ValueError, "Buffer datatype mismatch (rejecting on '%%s')", ts);
PyErr_Format(PyExc_ValueError, "Buffer datatype mismatch (expected '%s', got '%%s')", ts);
return NULL;
} else return ts + 1;
""", 2) % char
""", 2) % (char, char)
else:
# Cannot trust declared size; but rely on int vs float and
# signed/unsigned to be correctly declared
......@@ -474,20 +475,27 @@ def get_ts_check_item(dtype, writer):
('b', 'char'), ('h', 'short'), ('i', 'int'),
('l', 'long'), ('q', 'long long')
]
if dtype.signed == 0:
code += "".join(["\n case '%s': ok = (sizeof(%s) == sizeof(%s) && (%s)-1 > 0); break;" %
elif dtype.is_float:
types = [('f', 'float'), ('d', 'double'), ('g', 'long double')]
else:
assert dtype.is_error
return name
if dtype.signed == 0:
code += "".join(["\n case '%s': ok = (sizeof(%s) == sizeof(%s) && (%s)-1 > 0); break;" %
(char.upper(), ctype, against, ctype) for char, against in types])
else:
code += "".join(["\n case '%s': ok = (sizeof(%s) == sizeof(%s) && (%s)-1 < 0); break;" %
else:
code += "".join(["\n case '%s': ok = (sizeof(%s) == sizeof(%s) && (%s)-1 < 0); break;" %
(char, ctype, against, ctype) for char, against in types])
code += dedent("""\
default: ok = 0;
}
if (!ok) {
PyErr_Format(PyExc_ValueError, "Buffer datatype mismatch (rejecting on '%s')", ts);
return NULL;
} else return ts + 1;
code += dedent("""\
default: ok = 0;
}
if (!ok) {
PyErr_Format(PyExc_ValueError, "Buffer datatype mismatch (rejecting on '%s')", ts);
return NULL;
} else return ts + 1;
""", 2)
writer.globalstate.use_utility_code([dedent("""\
static const char* %s(const char* ts); /*proto*/
""") % name, dedent("""
......@@ -537,7 +545,7 @@ def get_getbuffer_code(dtype, code):
ts = __Pyx_ConsumeWhitespace(ts);
if (*ts != 0) {
PyErr_Format(PyExc_ValueError,
"Expected non-struct buffer data type (rejecting on '%%s')", ts);
"Expected non-struct buffer data type (expected end, got '%%s')", ts);
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
......
......@@ -38,7 +38,7 @@ Options:
-a, --annotate Produce a colorized HTML version of the source.
--convert-range Convert for loops using range() function to for...from loops.
--cplus Output a c++ rather than c file.
-O, --option <name>=<value>[,<name=value,...] Overrides an optimization/code generation option
-X, --directive <name>=<value>[,<name=value,...] Overrides a compiler directive
"""
#The following experimental options are supported only on MacOSX:
# -C, --compile Compile generated .c file to .o file
......@@ -114,11 +114,11 @@ def parse_command_line(args):
Options.annotate = True
elif option == "--convert-range":
Options.convert_range = True
elif option in ("-O", "--option"):
elif option in ("-X", "--directive"):
try:
options.pragma_overrides = Options.parse_option_list(pop_arg())
except ValueError, e:
sys.stderr.write("Error in option string: %s\n" % e.message)
sys.stderr.write("Error in compiler directive: %s\n" % e.message)
sys.exit(1)
else:
bad_usage()
......
......@@ -13,7 +13,7 @@ class CythonScope(ModuleScope):
self.shape_entry = self.declare_cfunction('shape',
shape_func_type,
pos=None,
visibility='public',
defining = 1,
cname='<error>')
def create_cython_scope(context):
......
......@@ -6,6 +6,7 @@ import operator
from string import join
from Errors import error, warning, InternalError
import StringEncoding
import Naming
from Nodes import Node
import PyrexTypes
......@@ -14,7 +15,6 @@ from Builtin import list_type, tuple_type, dict_type, unicode_type
import Symtab
import Options
from Annotate import AnnotationItem
from Cython import Utils
from Cython.Debugging import print_call_chain
from DebugFlags import debug_disposal_code, debug_temp_alloc, \
......@@ -640,10 +640,10 @@ class CharNode(ConstNode):
type = PyrexTypes.c_char_type
def compile_time_value(self, denv):
return ord(self.value.byteencode())
return ord(self.value)
def calculate_result_code(self):
return "'%s'" % Utils.escape_character(self.value.byteencode())
return "'%s'" % StringEncoding.escape_character(self.value)
class IntNode(ConstNode):
......@@ -1369,6 +1369,10 @@ class IndexNode(ExprNode):
# Note: This might be cleaned up by having IndexNode
# parsed in a saner way and only construct the tuple if
# needed.
# Note that this function must leave IndexNode in a cloneable state.
# For buffers, self.index is packed out on the initial analysis, and
# when cloning self.indices is copied.
self.is_buffer_access = False
self.base.analyse_types(env)
......@@ -1379,11 +1383,16 @@ class IndexNode(ExprNode):
skip_child_analysis = False
buffer_access = False
if self.base.type.is_buffer:
assert isinstance(self.base, NameNode)
if isinstance(self.index, TupleNode):
indices = self.index.args
assert hasattr(self.base, "entry") # Must be a NameNode-like node
if self.indices:
indices = self.indices
else:
indices = [self.index]
# On cloning, indices is cloned. Otherwise, unpack index into indices
assert not isinstance(self.index, CloneNode)
if isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
if len(indices) == self.base.type.ndim:
buffer_access = True
skip_child_analysis = True
......@@ -1469,7 +1478,7 @@ class IndexNode(ExprNode):
def generate_subexpr_evaluation_code(self, code):
self.base.generate_evaluation_code(code)
if self.index is not None:
if not self.indices:
self.index.generate_evaluation_code(code)
else:
for i in self.indices:
......@@ -1477,7 +1486,7 @@ class IndexNode(ExprNode):
def generate_subexpr_disposal_code(self, code):
self.base.generate_disposal_code(code)
if self.index is not None:
if not self.indices:
self.index.generate_disposal_code(code)
else:
for i in self.indices:
......@@ -1525,30 +1534,34 @@ class IndexNode(ExprNode):
value_code,
self.index_unsigned_parameter(),
code.error_goto(self.pos)))
def generate_buffer_setitem_code(self, rhs, code, op=""):
# Used from generate_assignment_code and InPlaceAssignmentNode
ptrexpr = self.buffer_lookup_code(code)
if self.buffer_type.dtype.is_pyobject:
# Must manage refcounts. Decref what is already there
# and incref what we put in.
ptr = code.funcstate.allocate_temp(self.buffer_type.buffer_ptr_type)
if rhs.is_temp:
rhs_code = code.funcstate.allocate_temp(rhs.type)
else:
rhs_code = rhs.result_code
code.putln("%s = %s;" % (ptr, ptrexpr))
code.putln("Py_DECREF(*%s); Py_INCREF(%s);" % (
ptr, rhs_code
))
code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
if rhs.is_temp:
code.funcstate.release_temp(rhs_code)
code.funcstate.release_temp(ptr)
else:
# Simple case
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result_code))
def generate_assignment_code(self, rhs, code):
self.generate_subexpr_evaluation_code(code)
if self.is_buffer_access:
ptrexpr = self.buffer_lookup_code(code)
if self.buffer_type.dtype.is_pyobject:
# Must manage refcounts. Decref what is already there
# and incref what we put in.
ptr = code.funcstate.allocate_temp(self.buffer_type.buffer_ptr_type)
if rhs.is_temp:
rhs_code = code.funcstate.allocate_temp(rhs.type)
else:
rhs_code = rhs.result_code
code.putln("%s = %s;" % (ptr, ptrexpr))
code.putln("Py_DECREF(*%s); Py_INCREF(%s);" % (
ptr, rhs_code
))
code.putln("*%s = %s;" % (ptr, rhs_code))
if rhs.is_temp:
code.funcstate.release_temp(rhs_code)
code.funcstate.release_temp(ptr)
else:
# Simple case
code.putln("*%s = %s;" % (ptrexpr, rhs.result_code))
self.generate_buffer_setitem_code(rhs, code)
elif self.type.is_pyobject:
self.generate_setitem_code(rhs.py_result(), code)
else:
......@@ -1582,6 +1595,9 @@ class IndexNode(ExprNode):
code.putln("%s = %s;" % (temp, index.result_code))
# Generate buffer access code using these temps
import Buffer
assert self.options is not None
# The above could happen because child_attrs is wrong somewhere so that
# options are not propagated.
return Buffer.put_buffer_lookup_code(entry=self.base.entry,
index_signeds=[i.type.signed for i in self.indices],
index_cnames=index_temps,
......@@ -2564,6 +2580,8 @@ class ListComprehensionNode(SequenceNode):
subexprs = []
is_sequence_constructor = 0 # not unpackable
child_attrs = ["loop", "append"]
def analyse_types(self, env):
self.type = list_type
self.is_temp = 1
......@@ -2589,6 +2607,8 @@ class ListComprehensionNode(SequenceNode):
class ListComprehensionAppendNode(ExprNode):
# Need to be careful to avoid infinite recursion:
# target must not be in child_attrs/subexprs
subexprs = ['expr']
def analyse_types(self, env):
......@@ -3066,6 +3086,20 @@ class SizeofTypeNode(SizeofNode):
subexprs = []
def analyse_types(self, env):
# we may have incorrectly interpreted a dotted name as a type rather than an attribute
# this could be better handled by more uniformly treating types as runtime-available objects
if self.base_type.module_path:
path = self.base_type.module_path
obj = env.lookup(path[0])
if obj.as_module is None:
operand = NameNode(pos=self.pos, name=path[0])
for attr in path[1:]:
operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr)
operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name)
self.operand = operand
self.__class__ = SizeofVarNode
self.analyse_types(env)
return
base_type = self.base_type.analyse(env)
_, arg_type = self.declarator.analyse(base_type, env)
self.arg_type = arg_type
......@@ -3937,6 +3971,7 @@ class CoercionNode(ExprNode):
def __init__(self, arg):
self.pos = arg.pos
self.arg = arg
self.options = arg.options
if debug_coercion:
print("%s Coercing %s" % (self, self.arg))
......
......@@ -79,9 +79,7 @@ def make_lexicon():
escaped_newline = Str("\\\n")
lineterm = Eol + Opt(Str("\n"))
comment_start = Str("#")
comment = comment_start + Rep(AnyBut("\n"))
option_comment = comment_start + Str("cython:") + Rep(AnyBut("\n"))
comment = Str("#") + Rep(AnyBut("\n"))
return Lexicon([
(name, 'IDENT'),
......@@ -98,13 +96,12 @@ def make_lexicon():
#(stringlit, 'STRING'),
(beginstring, Method('begin_string_action')),
(option_comment, Method('option_comment')),
(comment, IGNORE),
(spaces, IGNORE),
(escaped_newline, IGNORE),
State('INDENT', [
(option_comment + lineterm, Method('option_comment')),
(comment + lineterm, Method('commentline')),
(Opt(spaces) + Opt(comment) + lineterm, IGNORE),
(indentation, Method('indentation_action')),
(Eof, Method('eof_action'))
......
......@@ -19,7 +19,7 @@ import Errors
import Parsing
import Version
from Scanning import PyrexScanner, FileSourceDescriptor
from Errors import PyrexError, CompileError, error
from Errors import PyrexError, CompileError, InternalError, error
from Symtab import BuiltinScope, ModuleScope
from Cython import Utils
from Cython.Utils import open_new_file, replace_suffix
......@@ -170,6 +170,10 @@ class Context:
except CompileError, err:
# err is set
Errors.report_error(err)
except InternalError, err:
# Only raise if there was not an earlier error
if Errors.num_errors == 0:
raise
return (err, data)
def find_module(self, module_name,
......@@ -397,6 +401,8 @@ class Context:
finally:
f.close()
except UnicodeDecodeError, msg:
#import traceback
#traceback.print_exc()
error((source_desc, 0, 0), "Decoding error, missing or incorrect coding=<encoding-name> at top of source (%s)" % msg)
if Errors.num_errors > 0:
raise CompileError
......
......@@ -23,7 +23,8 @@ import Version
from Errors import error, warning
from PyrexTypes import py_object_type
from Cython.Utils import open_new_file, replace_suffix, escape_byte_string, EncodedString
from Cython.Utils import open_new_file, replace_suffix
from StringEncoding import escape_byte_string, EncodedString
def check_c_classes(module_node):
......@@ -421,7 +422,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("#if PY_VERSION_HEX < 0x02060000")
code.putln(" #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)")
code.putln(" #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)")
code.putln(" #define Py_SIZE(ob) ((PyVarObject*)(ob))->ob_size)")
code.putln(" #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size)")
code.putln(" #define PyVarObject_HEAD_INIT(type, size) \\")
code.putln(" PyObject_HEAD_INIT(type) size,")
code.putln(" #define PyType_Modified(t)")
......@@ -488,12 +489,17 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln(" #define PyMethod_New(func, self, klass) PyInstanceMethod_New(func)")
code.putln("#endif")
code.putln("#ifndef __stdcall")
code.putln(" #define __stdcall")
code.putln("#endif")
code.putln("#ifndef __cdecl")
code.putln(" #define __cdecl")
code.putln("#if !defined(WIN32) && !defined(MS_WINDOWS)")
code.putln(" #ifndef __stdcall")
code.putln(" #define __stdcall")
code.putln(" #endif")
code.putln(" #ifndef __cdecl")
code.putln(" #define __cdecl")
code.putln(" #endif")
code.putln("#else")
code.putln(" #define _USE_MATH_DEFINES")
code.putln("#endif")
self.generate_extern_c_macro_definition(code)
code.putln("#include <math.h>")
code.putln("#define %s" % Naming.api_guard_prefix + self.api_name(env))
......@@ -514,9 +520,12 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln('static const char *%s;' % Naming.filename_cname)
code.putln('static const char **%s;' % Naming.filetable_cname)
if env.doc:
docstr = env.doc
if not isinstance(docstr, str):
docstr = docstr.utf8encode()
code.putln('')
code.putln('static char %s[] = "%s";' % (
env.doc_cname, escape_byte_string(env.doc.utf8encode())))
env.doc_cname, escape_byte_string(docstr)))
def generate_extern_c_macro_definition(self, code):
name = Naming.extern_c_macro
......
......@@ -13,7 +13,7 @@ from PyrexTypes import py_object_type, error_type, CTypedefType, CFuncType
from Symtab import ModuleScope, LocalScope, GeneratorLocalScope, \
StructOrUnionScope, PyClassScope, CClassScope
from Cython.Utils import open_new_file, replace_suffix
from Cython.Utils import EncodedString, escape_byte_string
from StringEncoding import EncodedString, escape_byte_string, split_docstring
import Options
import ControlFlow
......@@ -71,10 +71,12 @@ class Node(object):
# pos (string, int, int) Source file position
# is_name boolean Is a NameNode
# is_literal boolean Is a ConstNode
# options dict Compiler directives in effect for this node
is_name = 0
is_literal = 0
temps = None
options = None
# All descandants should set child_attrs to a list of the attributes
# containing nodes considered "children" in the tree. Each such attribute
......@@ -174,10 +176,18 @@ class Node(object):
self._end_pos = pos
return pos
def dump(self, level=0, filter_out=("pos",)):
def dump(self, level=0, filter_out=("pos",), cutoff=100, encountered=None):
if cutoff == 0:
return "<...nesting level cutoff...>"
if encountered is None:
encountered = set()
if id(self) in encountered:
return "<%s (%d) -- already output>" % (self.__class__.__name__, id(self))
encountered.add(id(self))
def dump_child(x, level):
if isinstance(x, Node):
return x.dump(level)
return x.dump(level, filter_out, cutoff-1, encountered)
elif isinstance(x, list):
return "[%s]" % ", ".join([dump_child(item, level) for item in x])
else:
......@@ -591,6 +601,7 @@ class CBufferAccessTypeNode(CBaseTypeNode):
def analyse(self, env):
base_type = self.base_type_node.analyse(env)
if base_type.is_error: return base_type
import Buffer
options = Buffer.analyse_buffer_options(
......@@ -1516,10 +1527,13 @@ class DefNode(FuncDefNode):
if proto_only:
return
if self.entry.doc and Options.docstrings:
docstr = self.entry.doc
if not isinstance(docstr, str):
docstr = docstr.utf8encode()
code.putln(
'static char %s[] = "%s";' % (
self.entry.doc_cname,
escape_byte_string(self.entry.doc.utf8encode())))
split_docstring(escape_byte_string(docstr))))
if with_pymethdef:
code.put(
"static PyMethodDef %s = " %
......@@ -2137,15 +2151,6 @@ class CClassDefNode(ClassDefNode):
if self.doc and Options.docstrings:
scope.doc = embed_position(self.pos, self.doc)
if has_body and not self.in_pxd:
# transforms not yet run on pxd files
from ParseTreeTransforms import AnalyseDeclarationsTransform
transform = AnalyseDeclarationsTransform(None)
for entry in scope.var_entries:
if hasattr(entry, 'needs_property'):
property = transform.create_Property(entry)
self.body.stats.append(property)
if has_body:
self.body.analyse_declarations(scope)
if self.in_pxd:
......@@ -2514,12 +2519,16 @@ class InPlaceAssignmentNode(AssignmentNode):
def generate_execution_code(self, code):
self.rhs.generate_evaluation_code(code)
self.dup.generate_subexpr_evaluation_code(code)
self.dup.generate_result_code(code)
# self.dup.generate_result_code is run only if it is not buffer access
if self.operator == "**":
extra = ", Py_None"
else:
extra = ""
import ExprNodes
if self.lhs.type.is_pyobject:
if isinstance(self.lhs, ExprNodes.IndexNode) and self.lhs.is_buffer_access:
error(self.pos, "In-place operators not allowed on object buffers in this release.")
self.dup.generate_result_code(code)
code.putln(
"%s = %s(%s, %s%s); %s" % (
self.result.result_code,
......@@ -2542,7 +2551,11 @@ class InPlaceAssignmentNode(AssignmentNode):
else:
error(self.pos, "No C inplace power operator")
# have to do assignment directly to avoid side-effects
code.putln("%s %s= %s;" % (self.lhs.result_code, c_op, self.rhs.result_code) )
if isinstance(self.lhs, ExprNodes.IndexNode) and self.lhs.is_buffer_access:
self.lhs.generate_buffer_setitem_code(self.rhs, code, c_op)
else:
self.dup.generate_result_code(code)
code.putln("%s %s= %s;" % (self.lhs.result_code, c_op, self.rhs.result_code) )
self.rhs.generate_disposal_code(code)
if self.dup.is_temp:
self.dup.generate_subexpr_disposal_code(code)
......@@ -2552,11 +2565,32 @@ class InPlaceAssignmentNode(AssignmentNode):
self.dup = self.lhs
self.dup.analyse_types(env)
if isinstance(self.lhs, ExprNodes.NameNode):
target_lhs = ExprNodes.NameNode(self.dup.pos, name = self.dup.name, is_temp = self.dup.is_temp, entry = self.dup.entry)
target_lhs = ExprNodes.NameNode(self.dup.pos,
name = self.dup.name,
is_temp = self.dup.is_temp,
entry = self.dup.entry,
options = self.dup.options)
elif isinstance(self.lhs, ExprNodes.AttributeNode):
target_lhs = ExprNodes.AttributeNode(self.dup.pos, obj = ExprNodes.CloneNode(self.lhs.obj), attribute = self.dup.attribute, is_temp = self.dup.is_temp)
target_lhs = ExprNodes.AttributeNode(self.dup.pos,
obj = ExprNodes.CloneNode(self.lhs.obj),
attribute = self.dup.attribute,
is_temp = self.dup.is_temp,
options = self.dup.options)
elif isinstance(self.lhs, ExprNodes.IndexNode):
target_lhs = ExprNodes.IndexNode(self.dup.pos, base = ExprNodes.CloneNode(self.dup.base), index = ExprNodes.CloneNode(self.lhs.index), is_temp = self.dup.is_temp)
if self.lhs.index:
index = ExprNodes.CloneNode(self.lhs.index)
else:
index = None
if self.lhs.indices:
indices = [ExprNodes.CloneNode(x) for x in self.lhs.indices]
else:
indices = []
target_lhs = ExprNodes.IndexNode(self.dup.pos,
base = ExprNodes.CloneNode(self.dup.base),
index = index,
indices = indices,
is_temp = self.dup.is_temp,
options = self.dup.options)
self.lhs = target_lhs
return self.dup
......@@ -3007,7 +3041,7 @@ class SwitchCaseNode(StatNode):
def annotate(self, code):
for cond in self.conditions:
cond.annotate(code)
body.annotate(code)
self.body.annotate(code)
class SwitchStatNode(StatNode):
# Generated in the optimization of an if-elif-else node
......@@ -3031,7 +3065,8 @@ class SwitchStatNode(StatNode):
self.test.annotate(code)
for case in self.cases:
case.annotate(code)
self.else_clause.annotate(code)
if self.else_clause is not None:
self.else_clause.annotate(code)
class LoopNode:
......
......@@ -63,6 +63,30 @@ option_defaults = {
'boundscheck' : True
}
def parse_option_value(name, value):
"""
Parses value as an option value for the given name and returns
the interpreted value. None is returned if the option does not exist.
>>> print parse_option_value('nonexisting', 'asdf asdfd')
None
>>> parse_option_value('boundscheck', 'True')
True
>>> parse_option_value('boundscheck', 'true')
Traceback (most recent call last):
...
ValueError: boundscheck directive must be set to True or False
"""
type = option_types.get(name)
if not type: return None
if type is bool:
if value == "True": return True
elif value == "False": return False
else: raise ValueError("%s directive must be set to True or False" % name)
else:
assert False
def parse_option_list(s):
"""
Parses a comma-seperated list of pragma options. Whitespace
......
......@@ -3,7 +3,7 @@ from Cython.Compiler.ModuleNode import ModuleNode
from Cython.Compiler.Nodes import *
from Cython.Compiler.ExprNodes import *
from Cython.Compiler.TreeFragment import TreeFragment
from Cython.Utils import EncodedString
from Cython.Compiler.StringEncoding import EncodedString
from Cython.Compiler.Errors import CompileError
try:
set
......
This diff is collapsed.
......@@ -2,7 +2,7 @@
# Pyrex - Types
#
from Cython import Utils
import StringEncoding
import Naming
import copy
......@@ -1000,7 +1000,7 @@ class CStringType:
def literal_code(self, value):
assert isinstance(value, str)
return '"%s"' % Utils.escape_byte_string(value)
return '"%s"' % StringEncoding.escape_byte_string(value)
class CUTF8CharArrayType(CStringType, CArrayType):
......
......@@ -17,7 +17,7 @@ from Cython.Plex.Errors import UnrecognizedInput
from Errors import CompileError, error
from Lexicon import string_prefixes, raw_prefixes, make_lexicon
from Cython import Utils
from StringEncoding import EncodedString
plex_version = getattr(Plex, '_version', None)
#print "Plex version:", plex_version ###
......@@ -290,7 +290,7 @@ class PyrexScanner(Scanner):
resword_dict = build_resword_dict()
def __init__(self, file, filename, parent_scanner = None,
scope = None, context = None, source_encoding=None):
scope = None, context = None, source_encoding=None, parse_comments=True):
Scanner.__init__(self, get_lexicon(), file, filename)
if parent_scanner:
self.context = parent_scanner.context
......@@ -306,7 +306,7 @@ class PyrexScanner(Scanner):
self.compile_time_env = initial_compile_time_env()
self.compile_time_eval = 1
self.compile_time_expr = 0
self.parse_option_comments = True
self.parse_comments = parse_comments
self.source_encoding = source_encoding
self.trace = trace_scanner
self.indentation_stack = [0]
......@@ -316,12 +316,9 @@ class PyrexScanner(Scanner):
self.sy = ''
self.next()
def option_comment(self, text):
# #cython:-comments should be treated as literals until
# parse_option_comments is set to False, at which point
# they should be ignored.
if self.parse_option_comments:
self.produce('option_comment', text)
def commentline(self, text):
if self.parse_comments:
self.produce('commentline', text)
def current_level(self):
return self.indentation_stack[-1]
......@@ -413,7 +410,7 @@ class PyrexScanner(Scanner):
if systring in self.resword_dict:
sy = systring
else:
systring = Utils.EncodedString(systring)
systring = EncodedString(systring)
systring.encoding = self.source_encoding
self.sy = sy
self.systring = systring
......
#
# Cython -- encoding related tools
#
import re
class UnicodeLiteralBuilder(object):
"""Assemble a unicode string.
"""
def __init__(self):
self.chars = []
def append(self, characters):
if isinstance(characters, str):
# this came from a Py2 string literal in the parser code
characters = characters.decode("ASCII")
assert isinstance(characters, unicode), str(type(characters))
self.chars.append(characters)
def append_charval(self, char_number):
self.chars.append( unichr(char_number) )
def getstring(self):
return EncodedString(u''.join(self.chars))
class BytesLiteralBuilder(object):
"""Assemble a byte string or char value.
"""
def __init__(self, target_encoding):
self.chars = []
self.target_encoding = target_encoding
def append(self, characters):
if isinstance(characters, unicode):
characters = characters.encode(self.target_encoding)
assert isinstance(characters, str), str(type(characters))
self.chars.append(characters)
def append_charval(self, char_number):
self.chars.append( chr(char_number) )
def getstring(self):
# this *must* return a byte string! => fix it in Py3k!!
s = BytesLiteral(''.join(self.chars))
s.encoding = self.target_encoding
return s
def getchar(self):
# this *must* return a byte string! => fix it in Py3k!!
return self.getstring()
class EncodedString(unicode):
# unicode string subclass to keep track of the original encoding.
# 'encoding' is None for unicode strings and the source encoding
# otherwise
encoding = None
def byteencode(self):
assert self.encoding is not None
return self.encode(self.encoding)
def utf8encode(self):
assert self.encoding is None
return self.encode("UTF-8")
def is_unicode(self):
return self.encoding is None
is_unicode = property(is_unicode)
class BytesLiteral(str):
# str subclass that is compatible with EncodedString
encoding = None
def byteencode(self):
return str(self)
def utf8encode(self):
assert False, "this is not a unicode string: %r" % self
is_unicode = False
char_from_escape_sequence = {
r'\a' : u'\a',
r'\b' : u'\b',
r'\f' : u'\f',
r'\n' : u'\n',
r'\r' : u'\r',
r'\t' : u'\t',
r'\v' : u'\v',
}.get
def _to_escape_sequence(s):
if s in '\n\r\t':
return repr(s)[1:-1]
elif s == '"':
return r'\"'
else:
# within a character sequence, oct passes much better than hex
return ''.join(['\\%03o' % ord(c) for c in s])
_c_special = ('\0', '\n', '\r', '\t', '??', '"')
_c_special_replacements = zip(_c_special, map(_to_escape_sequence, _c_special))
def _build_specials_test():
subexps = []
for special in _c_special:
regexp = ''.join(['[%s]' % c for c in special])
subexps.append(regexp)
return re.compile('|'.join(subexps)).search
_has_specials = _build_specials_test()
def escape_character(c):
if c in '\n\r\t\\':
return repr(c)[1:-1]
elif c == "'":
return "\\'"
n = ord(c)
if n < 32 or n > 127:
# hex works well for characters
return "\\x%02X" % n
else:
return c
def escape_byte_string(s):
s = s.replace('\\', '\\\\')
if _has_specials(s):
for special, replacement in _c_special_replacements:
s = s.replace(special, replacement)
try:
s.decode("ASCII")
return s
except UnicodeDecodeError:
pass
l = []
append = l.append
for c in s:
o = ord(c)
if o >= 128:
append('\\%3o' % o)
else:
append(c)
return ''.join(l)
def split_docstring(s):
if len(s) < 2047:
return s
return '\\n\"\"'.join(s.split(r'\n'))
......@@ -5,6 +5,7 @@
import re
from Cython import Utils
from Errors import warning, error, InternalError
from StringEncoding import EncodedString
import Options
import Naming
import PyrexTypes
......@@ -439,7 +440,7 @@ class Scope:
if api:
entry.api = 1
if not defining and not in_pxd and visibility != 'extern':
error(pos, "Non-extern C function declared but not defined")
error(pos, "Non-extern C function '%s' declared but not defined" % name)
return entry
def add_cfunction(self, name, type, pos, cname, visibility):
......@@ -684,14 +685,14 @@ class BuiltinScope(Scope):
utility_code = None):
# If python_equiv == "*", the Python equivalent has the same name
# as the entry, otherwise it has the name specified by python_equiv.
name = Utils.EncodedString(name)
entry = self.declare_cfunction(name, type, None, cname)
name = EncodedString(name)
entry = self.declare_cfunction(name, type, None, cname, visibility='extern')
entry.utility_code = utility_code
if python_equiv:
if python_equiv == "*":
python_equiv = name
else:
python_equiv = Utils.EncodedString(python_equiv)
python_equiv = EncodedString(python_equiv)
var_entry = Entry(python_equiv, python_equiv, py_object_type)
var_entry.is_variable = 1
var_entry.is_builtin = 1
......@@ -699,7 +700,7 @@ class BuiltinScope(Scope):
return entry
def declare_builtin_type(self, name, cname):
name = Utils.EncodedString(name)
name = EncodedString(name)
type = PyrexTypes.BuiltinObjectType(name, cname)
type.set_scope(CClassScope(name, outer_scope=None, visibility='extern'))
self.type_names[name] = 1
......@@ -1370,7 +1371,7 @@ class CClassScope(ClassScope):
if name == "__new__":
warning(pos, "__new__ method of extension type will change semantics "
"in a future version of Pyrex and Cython. Use __cinit__ instead.")
name = Utils.EncodedString("__cinit__")
name = EncodedString("__cinit__")
entry = self.declare_var(name, py_object_type, pos, visibility='extern')
special_sig = get_special_method_signature(name)
if special_sig:
......@@ -1387,7 +1388,7 @@ class CClassScope(ClassScope):
def lookup_here(self, name):
if name == "__new__":
name = Utils.EncodedString("__cinit__")
name = EncodedString("__cinit__")
return ClassScope.lookup_here(self, name)
def declare_cfunction(self, name, type, pos,
......
......@@ -3,9 +3,9 @@
# and associated know-how.
#
from Cython import Utils
import Naming
import PyrexTypes
import StringEncoding
import sys
class Signature:
......@@ -311,7 +311,7 @@ class DocStringSlot(SlotDescriptor):
doc = scope.doc.utf8encode()
else:
doc = scope.doc.byteencode()
return '"%s"' % Utils.escape_byte_string(doc)
return '"%s"' % StringEncoding.escape_byte_string(doc)
else:
return "0"
......
version = '0.9.8'
version = '0.9.8.1.1'
......@@ -5,7 +5,7 @@ import inspect
import Nodes
import ExprNodes
import Naming
from Cython.Utils import EncodedString
from StringEncoding import EncodedString
class BasicVisitor(object):
"""A generic visitor base class which can be used for visiting any kind of object."""
......
......@@ -3,79 +3,107 @@ cdef extern from "Python.h":
cdef extern from "numpy/arrayobject.h":
ctypedef Py_intptr_t npy_intp
ctypedef struct PyArray_Descr:
int elsize
cdef enum:
NPY_BOOL,
NPY_BYTE, NPY_UBYTE,
NPY_SHORT, NPY_USHORT,
NPY_INT, NPY_UINT,
NPY_LONG, NPY_ULONG,
NPY_LONGLONG, NPY_ULONGLONG,
NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE,
NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE,
NPY_OBJECT,
NPY_STRING, NPY_UNICODE,
NPY_VOID,
NPY_NTYPES,
NPY_NOTYPE,
NPY_CHAR,
NPY_USERDEF
ctypedef class numpy.ndarray [object PyArrayObject]:
cdef __cythonbufferdefaults__ = {"mode": "strided"}
cdef:
char *data
int nd
npy_intp *dimensions
int ndim "nd"
npy_intp *shape "dimensions"
npy_intp *strides
object base
# descr not implemented yet here...
int flags
int itemsize
object weakreflist
PyArray_Descr* descr
# Note: This syntax (function definition in pxd files) is an
# experimental exception made for __getbuffer__ and __releasebuffer__
# -- the details of this may change.
def __getbuffer__(ndarray self, Py_buffer* info, int flags):
# This implementation of getbuffer is geared towards Cython
# requirements, and does not yet fullfill the PEP (specifically,
# Cython always requests and we always provide strided access,
# so the flags are not even checked).
if sizeof(npy_intp) != sizeof(Py_ssize_t):
raise RuntimeError("Py_intptr_t and Py_ssize_t differs in size, numpy.pxd does not support this")
cdef int typenum = PyArray_TYPE(self)
info.buf = <void*>self.data
info.ndim = 2
info.strides = <Py_ssize_t*>self.strides
info.shape = <Py_ssize_t*>self.dimensions
info.buf = PyArray_DATA(self)
info.ndim = PyArray_NDIM(self)
info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
info.shape = <Py_ssize_t*>PyArray_DIMS(self)
info.suboffsets = NULL
info.format = "i"
info.itemsize = self.descr.elsize
info.itemsize = PyArray_ITEMSIZE(self)
info.readonly = not PyArray_ISWRITEABLE(self)
# PS TODO TODO!: Py_ssize_t vs Py_intptr_t
# Formats that are not tested and working in Cython are not
# made available from this pxd file yet.
cdef int t = PyArray_TYPE(self)
cdef char* f = NULL
if t == NPY_BYTE: f = "b"
elif t == NPY_UBYTE: f = "B"
elif t == NPY_SHORT: f = "h"
elif t == NPY_USHORT: f = "H"
elif t == NPY_INT: f = "i"
elif t == NPY_UINT: f = "I"
elif t == NPY_LONG: f = "l"
elif t == NPY_ULONG: f = "L"
elif t == NPY_LONGLONG: f = "q"
elif t == NPY_ULONGLONG: f = "Q"
elif t == NPY_FLOAT: f = "f"
elif t == NPY_DOUBLE: f = "d"
elif t == NPY_LONGDOUBLE: f = "g"
elif t == NPY_OBJECT: f = "O"
if f == NULL:
raise ValueError("only objects, int and float dtypes supported for ndarray buffer access so far (dtype is %d)" % t)
info.format = f
## PyArrayObject *arr = (PyArrayObject*)obj;
## PyArray_Descr *type = (PyArray_Descr*)arr->descr;
## int typenum = PyArray_TYPE(obj);
## if (!PyTypeNum_ISNUMBER(typenum)) {
## PyErr_Format(PyExc_TypeError, "Only numeric NumPy types currently supported.");
## return -1;
## }
## /*
## NumPy format codes doesn't completely match buffer codes;
## seems safest to retranslate.
## 01234567890123456789012345*/
## const char* base_codes = "?bBhHiIlLqQfdgfdgO";
## char* format = (char*)malloc(4);
## char* fp = format;
## *fp++ = type->byteorder;
## if (PyTypeNum_ISCOMPLEX(typenum)) *fp++ = 'Z';
## *fp++ = base_codes[typenum];
## *fp = 0;
## view->buf = arr->data;
## view->readonly = !PyArray_ISWRITEABLE(obj);
## view->ndim = PyArray_NDIM(arr);
## view->strides = PyArray_STRIDES(arr);
## view->shape = PyArray_DIMS(arr);
## view->suboffsets = NULL;
## view->format = format;
## view->itemsize = type->elsize;
## view->internal = 0;
## return 0;
## print "hello" + str(43) + "asdf" + "three"
## pass
cdef void* PyArray_DATA(ndarray arr)
cdef int PyArray_TYPE(ndarray arr)
cdef int PyArray_NDIM(ndarray arr)
cdef int PyArray_ISWRITEABLE(ndarray arr)
cdef npy_intp PyArray_STRIDES(ndarray arr)
cdef npy_intp PyArray_DIMS(ndarray arr)
cdef Py_ssize_t PyArray_ITEMSIZE(ndarray arr)
ctypedef signed int npy_byte
ctypedef signed int npy_short
ctypedef signed int npy_int
ctypedef signed int npy_long
ctypedef signed int npy_longlong
ctypedef unsigned int npy_ubyte
ctypedef unsigned int npy_ushort
ctypedef unsigned int npy_uint
ctypedef unsigned int npy_ulong
ctypedef unsigned int npy_ulonglong
ctypedef float npy_float
ctypedef float npy_double
ctypedef float npy_longdouble
ctypedef signed int npy_int8
ctypedef signed int npy_int16
ctypedef signed int npy_int32
ctypedef signed int npy_int64
ctypedef signed int npy_int96
ctypedef signed int npy_int128
ctypedef unsigned int npy_uint8
ctypedef unsigned int npy_uint16
......@@ -83,7 +111,6 @@ cdef extern from "numpy/arrayobject.h":
ctypedef unsigned int npy_uint64
ctypedef unsigned int npy_uint96
ctypedef unsigned int npy_uint128
ctypedef signed int npy_int64
ctypedef float npy_float32
ctypedef float npy_float64
......@@ -91,5 +118,40 @@ cdef extern from "numpy/arrayobject.h":
ctypedef float npy_float96
ctypedef float npy_float128
# Typedefs that matches the runtime dtype objects in
# the numpy module.
# The ones that are commented out needs an IFDEF function
# in Cython to enable them only on the right systems.
ctypedef npy_int8 int8_t
ctypedef npy_int16 int16_t
ctypedef npy_int32 int32_t
ctypedef npy_int64 int64_t
#ctypedef npy_int96 int96_t
#ctypedef npy_int128 int128_t
ctypedef npy_uint8 uint8_t
ctypedef npy_uint16 uint16_t
ctypedef npy_uint32 uint32_t
ctypedef npy_uint64 uint64_t
#ctypedef npy_uint96 uint96_t
#ctypedef npy_uint128 uint128_t
ctypedef npy_float32 float32_t
ctypedef npy_float64 float64_t
#ctypedef npy_float80 float80_t
#ctypedef npy_float128 float128_t
# The int types are mapped a bit surprising --
# numpy.int corresponds to 'l' and numpy.long to 'q'
ctypedef npy_long int_t
ctypedef npy_longlong long_t
ctypedef npy_ulong uint_t
ctypedef npy_ulonglong ulong_t
ctypedef npy_double float_t
ctypedef npy_double double_t
ctypedef npy_longdouble longdouble_t
ctypedef npy_int64 int64
......@@ -40,7 +40,7 @@ def file_newer_than(path, time):
ftime = modification_time(path)
return ftime > time
# support for source file encoding detection and unicode decoding
# support for source file encoding detection
def encode_filename(filename):
if isinstance(filename, unicode):
......@@ -77,90 +77,6 @@ def open_source_file(source_filename, mode="rU"):
encoding = detect_file_encoding(source_filename)
return codecs.open(source_filename, mode=mode, encoding=encoding)
class EncodedString(unicode):
# unicode string subclass to keep track of the original encoding.
# 'encoding' is None for unicode strings and the source encoding
# otherwise
encoding = None
def byteencode(self):
assert self.encoding is not None
return self.encode(self.encoding)
def utf8encode(self):
assert self.encoding is None
return self.encode("UTF-8")
def is_unicode(self):
return self.encoding is None
is_unicode = property(is_unicode)
# def __eq__(self, other):
# return unicode.__eq__(self, other) and \
# getattr(other, 'encoding', '') == self.encoding
char_from_escape_sequence = {
r'\a' : '\a',
r'\b' : '\b',
r'\f' : '\f',
r'\n' : '\n',
r'\r' : '\r',
r'\t' : '\t',
r'\v' : '\v',
}.get
def _to_escape_sequence(s):
if s in '\n\r\t':
return repr(s)[1:-1]
elif s == '"':
return r'\"'
else:
# within a character sequence, oct passes much better than hex
return ''.join(['\\%03o' % ord(c) for c in s])
_c_special = ('\0', '\n', '\r', '\t', '??', '"')
_c_special_replacements = zip(_c_special, map(_to_escape_sequence, _c_special))
def _build_specials_test():
subexps = []
for special in _c_special:
regexp = ''.join(['[%s]' % c for c in special])
subexps.append(regexp)
return re.compile('|'.join(subexps)).search
_has_specials = _build_specials_test()
def escape_character(c):
if c in '\n\r\t\\':
return repr(c)[1:-1]
elif c == "'":
return "\\'"
elif ord(c) < 32:
# hex works well for characters
return "\\x%02X" % ord(c)
else:
return c
def escape_byte_string(s):
s = s.replace('\\', '\\\\')
if _has_specials(s):
for special, replacement in _c_special_replacements:
s = s.replace(special, replacement)
try:
s.decode("ASCII")
return s
except UnicodeDecodeError:
pass
l = []
append = l.append
for c in s:
o = ord(c)
if o >= 128:
append('\\%3o' % o)
else:
append(c)
return ''.join(l)
def long_literal(value):
if isinstance(value, basestring):
if len(value) < 2:
......
......@@ -6,6 +6,7 @@ include setup.py
include bin/cython bin/update_references
include cython.py
include Cython/Compiler/Lexicon.pickle
include Cython/Includes/*.pxd
include Doc/*
include Demos/*.pyx
......@@ -16,9 +17,11 @@ include Demos/embed/*
include Demos/Setup.py
include Demos/Makefile*
include Tools/*
recursive-include Includes *
recursive-include tests *.pyx *.pxd *.pxi *.h *.BROKEN
include runtests.py
include Cython/Mac/Makefile
include Cython/Mac/_Filemodule_patched.c
recursive-include pyximport *.py
include pyximport/PKG-INFO pyximport/README
Metadata-Version: 1.0
Name: pyximport
Version: 1.0
Summary: Hooks to build and run Pyrex files as if they were simple Python files
Home-page: http://www.prescod.net/pyximport
Author: Paul Prescod
Author-email: paul@prescod.net
License: Python
Description: UNKNOWN
Keywords: pyrex import hook
Platform: UNKNOWN
== Pyximport ==
Download: pyx-import-1.0.tar.gz
<http://www.prescod.net/pyximport/pyximport-1.0.tar.gz>
Pyrex is a compiler. Therefore it is natural that people tend to go
through an edit/compile/test cycle with Pyrex modules. But my personal
opinion is that one of the deep insights in Python's implementation is
that a language can be compiled (Python modules are compiled to .pyc)
files and hide that compilation process from the end-user so that they
do not have to worry about it. Pyximport does this for Pyrex modules.
For instance if you write a Pyrex module called "foo.pyx", with
Pyximport you can import it in a regular Python module like this:
import pyximport; pyximport.install()
import foo
Doing so will result in the compilation of foo.pyx (with appropriate
exceptions if it has an error in it).
If you would always like to import pyrex files without building them
specially, you can also the first line above to your sitecustomize.py.
That will install the hook every time you run Python. Then you can use
Pyrex modules just with simple import statements. I like to test my
Pyrex modules like this:
python -c "import foo"
== Dependency Handling ==
In Pyximport 1.1 it is possible to declare that your module depends on
multiple files, (likely ".h" and ".pxd" files). If your Pyrex module is
named "foo" and thus has the filename "foo.pyx" then you should make
another file in the same directory called "foo.pyxdep". The
"modname.pyxdep" file can be a list of filenames or "globs" (like
"*.pxd" or "include/*.h"). Each filename or glob must be on a separate
line. Pyximport will check the file date for each of those files before
deciding whether to rebuild the module. In order to keep track of the
fact that the dependency has been handled, Pyximport updates the
modification time of your ".pyx" source file. Future versions may do
something more sophisticated like informing distutils of the
dependencies directly.
== Limitations ==
Pyximport does not give you any control over how your Pyrex file is
compiled. Usually the defaults are fine. You might run into problems if
you wanted to write your program in half-C, half-Pyrex and build them
into a single library. Pyximport 1.2 will probably do this.
Pyximport does not hide the Distutils/GCC warnings and errors generated
by the import process. Arguably this will give you better feedback if
something went wrong and why. And if nothing went wrong it will give you
the warm fuzzy that pyximport really did rebuild your module as it was
supposed to.
== For further thought and discussion ==
I don't think that Python's "reload" will do anything for changed .SOs
on some (all?) platforms. It would require some (easy) experimentation
that I haven't gotten around to. But reload is rarely used in
applications outside of the Python interactive interpreter and certainly
not used much for C extension modules. Info about Windows
<http://mail.python.org/pipermail/python-list/2001-July/053798.html>
"setup.py install" does not modify sitecustomize.py for you. Should it?
Modifying Python's "standard interpreter" behaviour may be more than
most people expect of a package they install..
Pyximport puts your ".c" file beside your ".pyx" file (analogous to
".pyc" beside ".py"). But it puts the platform-specific binary in a
build directory as per normal for Distutils. If I could wave a magic
wand and get Pyrex or distutils or whoever to put the build directory I
might do it but not necessarily: having it at the top level is VERY
HELPFUL for debugging Pyrex problems.
from distutils.core import setup
import sys, os
from StringIO import StringIO
if "sdist" in sys.argv:
try:
os.remove("MANIFEST")
except (IOError, OSError):
pass
import html2text
out = StringIO()
html2text.convert_files(open("index.html"), out)
out.write("\n\n")
open("README", "w").write(out.getvalue())
setup(
name = "pyximport",
fullname = "Pyrex Import Hooks",
version = "1.0",
description = "Hooks to build and run Pyrex files as if they were simple Python files",
author = "Paul Prescod",
author_email = "paul@prescod.net",
url = "http://www.prescod.net/pyximport",
license = "Python",
keywords = "pyrex import hook",
# scripts = ["pyxrun"],
# data_files = [("examples/multi_file_extension",
# ["README", "ccode.c", "test.pyx", "test.pyxbld"]),
# ("examples/dependencies",
# ["README", "test.pyx", "test.pyxdep", "header.h",
# "header2.h", "header3.h", "header4.h"])
# ],
py_modules = ["pyximport", "pyxbuild"])
from pyximport import *
"""Build a Pyrex file from .pyx source to .so loadable module using
the installed distutils infrastructure. Call:
out_fname = pyx_to_dll("foo.pyx")
"""
import os, md5
import distutils
from distutils.dist import Distribution
from distutils.errors import DistutilsArgError, DistutilsError, CCompilerError
from distutils.extension import Extension
from distutils.util import grok_environment_error
from Cython.Distutils import build_ext
import shutil
DEBUG = 0
def pyx_to_dll(filename, ext = None, force_rebuild = 0):
"""Compile a PYX file to a DLL and return the name of the generated .so
or .dll ."""
assert os.path.exists(filename)
path, name = os.path.split(filename)
if not ext:
modname, extension = os.path.splitext(name)
assert extension == ".pyx", extension
ext = Extension(name=modname, sources=[filename])
if DEBUG:
quiet = "--verbose"
else:
quiet = "--quiet"
args = [quiet, "build_ext"]
if force_rebuild:
args.append("--force")
dist = Distribution({"script_name": None, "script_args": args})
if not dist.ext_modules:
dist.ext_modules = []
dist.ext_modules.append(ext)
dist.cmdclass = {'build_ext': build_ext}
build = dist.get_command_obj('build')
build.build_base = os.path.join(path, "_pyxbld")
try:
ok = dist.parse_command_line()
except DistutilsArgError, msg:
raise
if DEBUG:
print "options (after parsing command line):"
dist.dump_option_dicts()
assert ok
try:
dist.run_commands()
return dist.get_command_obj("build_ext").get_outputs()[0]
except KeyboardInterrupt:
raise SystemExit, "interrupted"
except (IOError, os.error), exc:
error = grok_environment_error(exc)
if DEBUG:
sys.stderr.write(error + "\n")
raise
else:
raise SystemExit, error
except (DistutilsError,
CCompilerError), msg:
if DEBUG:
raise
else:
raise SystemExit, "error: " + str(msg)
if __name__=="__main__":
pyx_to_dll("dummy.pyx")
import test
"""
Import hooks; when installed (with the install()) function, these hooks
allow importing .pyx files as if they were Python modules.
If you want the hook installed every time you run Python
you can add it to your Python version by adding these lines to
sitecustomize.py (which you can create from scratch in site-packages
if it doesn't exist there are somewhere else on your python path)
import pyximport
pyximport.install()
For instance on the Mac with Python 2.3 built from CVS, you could
create sitecustomize.py with only those two lines at
/usr/local/lib/python2.3/site-packages/sitecustomize.py .
Running this module as a top-level script will run a test and then print
the documentation.
This code was modeled on Quixote's ptl_import.
"""
import sys, os, shutil
import imp, ihooks, glob, md5
import __builtin__
import pyxbuild
from distutils.dep_util import newer
from distutils.extension import Extension
mod_name = "pyximport"
assert sys.hexversion >= 0x20000b1, "need Python 2.0b1 or later"
PYX_FILE_TYPE = 1011
PYX_EXT = ".pyx"
PYXDEP_EXT = ".pyxdep"
PYXBLD_EXT = ".pyxbld"
_test_files = []
class PyxHooks (ihooks.Hooks):
"""Import hook that declares our suffixes. Let install() install it."""
def get_suffixes (self):
# add our suffixes
return imp.get_suffixes() + [(PYX_EXT, "r", PYX_FILE_TYPE)]
# Performance problem: for every PYX file that is imported, we will
# invoke the whole distutils infrastructure even if the module is
# already built. It might be more efficient to only do it when the
# mod time of the .pyx is newer than the mod time of the .so but
# the question is how to get distutils to tell me the name of the .so
# before it builds it. Maybe it is easy...but maybe the peformance
# issue isn't real.
def _load_pyrex(name, filename):
"Load a pyrex file given a name and filename."
def get_distutils_extension(modname, pyxfilename):
extra = "_" + md5.md5(open(pyxfilename).read()).hexdigest()
# modname = modname + extra
extension_mod = handle_special_build(modname, pyxfilename)
if not extension_mod:
extension_mod = Extension(name = modname, sources=[pyxfilename])
return extension_mod
def handle_special_build(modname, pyxfilename):
special_build = os.path.splitext(pyxfilename)[0] + PYXBLD_EXT
if not os.path.exists(special_build):
ext = None
else:
globls = {}
locs = {}
# execfile(special_build, globls, locs)
# ext = locs["make_ext"](modname, pyxfilename)
mod = imp.load_source("XXXX", special_build, open(special_build))
ext = mod.make_ext(modname, pyxfilename)
assert ext and ext.sources, ("make_ext in %s did not return Extension"
% special_build)
ext.sources = [os.path.join(os.path.dirname(special_build), source)
for source in ext.sources]
return ext
def handle_dependencies(pyxfilename):
dependfile = os.path.splitext(pyxfilename)[0] + PYXDEP_EXT
# by default let distutils decide whether to rebuild on its own
# (it has a better idea of what the output file will be)
# but we know more about dependencies so force a rebuild if
# some of the dependencies are newer than the pyxfile.
if os.path.exists(dependfile):
depends = open(dependfile).readlines()
depends = [depend.strip() for depend in depends]
# gather dependencies in the "files" variable
# the dependency file is itself a dependency
files = [dependfile]
for depend in depends:
fullpath = os.path.join(os.path.dirname(dependfile),
depend)
files.extend(glob.glob(fullpath))
# only for unit testing to see we did the right thing
_test_files[:] = []
# if any file that the pyxfile depends upon is newer than
# the pyx file, 'touch' the pyx file so that distutils will
# be tricked into rebuilding it.
for file in files:
if newer(file, pyxfilename):
print "Rebuilding because of ", file
filetime = os.path.getmtime(file)
os.utime(pyxfilename, (filetime, filetime))
_test_files.append(file)
def build_module(name, pyxfilename):
assert os.path.exists(pyxfilename), (
"Path does not exist: %s" % pyxfilename)
handle_dependencies(pyxfilename)
extension_mod = get_distutils_extension(name, pyxfilename)
so_path = pyxbuild.pyx_to_dll(pyxfilename, extension_mod)
assert os.path.exists(so_path), "Cannot find: %s" % so_path
junkpath = os.path.join(os.path.dirname(so_path), name+"_*")
junkstuff = glob.glob(junkpath)
for path in junkstuff:
if path!=so_path:
try:
os.remove(path)
except IOError:
print "Couldn't remove ", path
return so_path
def load_module(name, pyxfilename):
so_path = build_module(name, pyxfilename)
mod = imp.load_dynamic(name, so_path)
assert mod.__file__ == so_path, (mod.__file__, so_path)
return mod
class PyxLoader (ihooks.ModuleLoader):
"""Load a module. It checks whether a file is a .pyx and returns it.
Otherwise it lets the ihooks base class handle it. Let install()
install it."""
def load_module (self, name, stuff):
# If it's a Pyrex file, load it specially.
if stuff[2][2] == PYX_FILE_TYPE:
file, pyxfilename, info = stuff
(suff, mode, type) = info
if file:
file.close()
return load_module(name, pyxfilename)
else:
# Otherwise, use the default handler for loading
return ihooks.ModuleLoader.load_module( self, name, stuff)
try:
import cimport
except ImportError:
cimport = None
class cModuleImporter(ihooks.ModuleImporter):
"""This was just left in from the Quixote implementation. I think
it allows a performance enhancement if you have the cimport module
from Quixote. Let install() install it."""
def __init__(self, loader=None):
self.loader = loader or ihooks.ModuleLoader()
cimport.set_loader(self.find_import_module)
def find_import_module(self, fullname, subname, path):
stuff = self.loader.find_module(subname, path)
if not stuff:
return None
return self.loader.load_module(fullname, stuff)
def install(self):
self.save_import_module = __builtin__.__import__
self.save_reload = __builtin__.reload
if not hasattr(__builtin__, 'unload'):
__builtin__.unload = None
self.save_unload = __builtin__.unload
__builtin__.__import__ = cimport.import_module
__builtin__.reload = cimport.reload_module
__builtin__.unload = self.unload
_installed = 0
def install():
"""Main entry point. call this to install the import hook in your
for a single Python process. If you want it to be installed whenever
you use Python, add it to your sitecustomize (as described above).
"""
global _installed
if not _installed:
hooks = PyxHooks()
loader = PyxLoader(hooks)
if cimport is not None:
importer = cModuleImporter(loader)
else:
importer = ihooks.ModuleImporter(loader)
ihooks.install(importer)
_installed = 1
def on_remove_file_error(func, path, excinfo):
print "Sorry! Could not remove a temp file:", path
print "Extra information."
print func, excinfo
print "You may want to delete this yourself when you get a chance."
def show_docs():
import __main__
__main__.__name__ = mod_name
for name in dir(__main__):
item = getattr(__main__, name)
try:
setattr(item, "__module__", mod_name)
except (AttributeError, TypeError):
pass
help(__main__)
if __name__ == '__main__':
show_docs()
import pyximport; pyximport.install()
import os, sys
import time, shutil
import tempfile
def make_tempdir():
tempdir = os.path.join(tempfile.gettempdir(), "pyrex_temp")
if os.path.exists(tempdir):
remove_tempdir(tempdir)
os.mkdir(tempdir)
return tempdir
def remove_tempdir(tempdir):
shutil.rmtree(tempdir, 0, on_remove_file_error)
def on_remove_file_error(func, path, excinfo):
print "Sorry! Could not remove a temp file:", path
print "Extra information."
print func, excinfo
print "You may want to delete this yourself when you get a chance."
def test():
tempdir = make_tempdir()
sys.path.append(tempdir)
filename = os.path.join(tempdir, "dummy.pyx")
open(filename, "w").write("print 'Hello world from the Pyrex install hook'")
import dummy
reload(dummy)
depend_filename = os.path.join(tempdir, "dummy.pyxdep")
depend_file = open(depend_filename, "w")
depend_file.write("*.txt\nfoo.bar")
depend_file.close()
build_filename = os.path.join(tempdir, "dummy.pyxbld")
build_file = open(build_filename, "w")
build_file.write("""
from distutils.extension import Extension
def make_ext(name, filename):
return Extension(name=name, sources=[filename])
""")
build_file.close()
open(os.path.join(tempdir, "foo.bar"), "w").write(" ")
open(os.path.join(tempdir, "1.txt"), "w").write(" ")
open(os.path.join(tempdir, "abc.txt"), "w").write(" ")
reload(dummy)
assert len(pyximport._test_files)==1, pyximport._test_files
reload(dummy)
time.sleep(1) # sleep a second to get safer mtimes
open(os.path.join(tempdir, "abc.txt"), "w").write(" ")
print "Here goes the reolad"
reload(dummy)
assert len(pyximport._test_files) == 1, pyximport._test_files
reload(dummy)
assert len(pyximport._test_files) ==0, pyximport._test_files
remove_tempdir(tempdir)
if __name__=="__main__":
test()
# reload seems to work for Python 2.3 but not 2.2.
import time, os, sys
import test_pyximport
# debugging the 2.2 problem
if 1:
from distutils import sysconfig
try:
sysconfig.set_python_build()
except AttributeError:
pass
import pyxbuild
print pyxbuild.distutils.sysconfig == sysconfig
def test():
tempdir = test_pyximport.make_tempdir()
sys.path.append(tempdir)
hello_file = os.path.join(tempdir, "hello.pyx")
open(hello_file, "w").write("x = 1; print x; before = 'before'\n")
import hello
assert hello.x == 1
time.sleep(1) # sleep to make sure that new "hello.pyx" has later
# timestamp than object file.
open(hello_file, "w").write("x = 2; print x; after = 'after'\n")
reload(hello)
assert hello.x == 2, "Reload should work on Python 2.3 but not 2.2"
test_pyximport.remove_tempdir(tempdir)
if __name__=="__main__":
test()
......@@ -12,6 +12,12 @@ distutils_distro = Distribution()
TEST_DIRS = ['compile', 'errors', 'run', 'pyregr']
TEST_RUN_DIRS = ['run', 'pyregr']
# Lists external modules, and a matcher matching tests
# which should be excluded if the module is not present.
EXT_DEP_MODULES = {
'numpy' : re.compile('.*\.numpy_.*').match
}
INCLUDE_DIRS = [ d for d in os.getenv('INCLUDE', '').split(os.pathsep) if d ]
CFLAGS = os.getenv('CFLAGS', '').split()
......@@ -45,11 +51,12 @@ class ErrorWriter(object):
return self._collect(True, True)
class TestBuilder(object):
def __init__(self, rootdir, workdir, selectors, annotate,
def __init__(self, rootdir, workdir, selectors, exclude_selectors, annotate,
cleanup_workdir, cleanup_sharedlibs, with_pyregr, cythononly):
self.rootdir = rootdir
self.workdir = workdir
self.selectors = selectors
self.exclude_selectors = exclude_selectors
self.annotate = annotate
self.cleanup_workdir = cleanup_workdir
self.cleanup_sharedlibs = cleanup_sharedlibs
......@@ -94,6 +101,9 @@ class TestBuilder(object):
if not [ 1 for match in self.selectors
if match(fqmodule) ]:
continue
if self.exclude_selectors:
if [1 for match in self.exclude_selectors if match(fqmodule)]:
continue
if context in TEST_RUN_DIRS:
if module.startswith("test_"):
build_test = CythonUnitTestCase
......@@ -355,6 +365,23 @@ def collect_doctests(path, module_prefix, suite, selectors):
except ValueError: # no tests
pass
class MissingDependencyExcluder:
def __init__(self, deps):
# deps: { module name : matcher func }
self.exclude_matchers = []
for mod, matcher in deps.items():
try:
__import__(mod)
except ImportError:
self.exclude_matchers.append(matcher)
self.tests_missing_deps = []
def __call__(self, testname):
for matcher in self.exclude_matchers:
if matcher(testname):
self.tests_missing_deps.append(testname)
return True
return False
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
......@@ -443,6 +470,12 @@ if __name__ == '__main__':
if not selectors:
selectors = [ lambda x:True ]
# Chech which external modules are not present and exclude tests
# which depends on them (by prefix)
missing_dep_excluder = MissingDependencyExcluder(EXT_DEP_MODULES)
exclude_selectors = [missing_dep_excluder] # want to pring msg at exit
test_suite = unittest.TestSuite()
if options.unittests:
......@@ -452,15 +485,17 @@ if __name__ == '__main__':
collect_doctests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors)
if options.filetests:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors,
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options.annotate_source, options.cleanup_workdir,
options.cleanup_sharedlibs, options.pyregr, options.cythononly)
options.cleanup_sharedlibs, options.pyregr,
options.cythononly)
test_suite.addTest(filetests.build_suite())
if options.system_pyregr:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors,
options.annotate_source, options.cleanup_workdir,
options.cleanup_sharedlibs, True)
options.cleanup_sharedlibs, True,
options.cythononly)
test_suite.addTest(
filetests.handle_directory(
os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3], 'test'),
......@@ -476,3 +511,8 @@ if __name__ == '__main__':
name.startswith('Cython.Compiler.') and
name[len('Cython.Compiler.'):] not in ignored_modules ]
coverage.report(modules, show_missing=0)
if missing_dep_excluder.tests_missing_deps:
sys.stderr.write("Following tests excluded because of missing dependencies on your system:\n")
for test in missing_dep_excluder.tests_missing_deps:
sys.stderr.write(" %s\n" % test)
......@@ -98,6 +98,9 @@ setup(
'Cython.Tests',
'Cython.Compiler.Tests',
],
# pyximport
py_modules = ["pyximport/pyximport", "pyximport/pyxbuild"],
**setup_args
)
#cython: boundscheck=False
# cython: boundscheck = False
# cython: ignoreme = OK
# This testcase is most useful if you inspect the generated C file
print 3
cimport python_dict as asadf, python_exc, cython as cy
def e(object[int, ndim=2] buf):
print buf[3, 2] # no bc
@cy.boundscheck(False)
def f(object[int, 2] buf):
print buf[3, 2]
def f(object[int, ndim=2] buf):
print buf[3, 2] # no bc
@cy.boundscheck(True)
def g(object[int, 2] buf):
# Please leave this comment,
#cython: this should have no special meaning
def g(object[int, ndim=2] buf):
# The below line should have no meaning
# cython: boundscheck = False
# even if the above line doesn't follow indentation.
print buf[3, 2]
print buf[3, 2] # bc
def h(object[int, 2] buf):
print buf[3, 2]
def h(object[int, ndim=2] buf):
print buf[3, 2] # no bc
with cy.boundscheck(True):
print buf[3,2]
print buf[3,2] # bc
from cython cimport boundscheck as bc
def i(object[int] buf):
with bc(True):
print buf[3]
print buf[3] # bs
cdef extern from "Python.h":
ctypedef struct PyTypeObject:
pass
ctypedef struct PyObject:
Py_ssize_t ob_refcnt
PyTypeObject *ob_type
cdef extern from "longintrepr.h":
cdef struct _longobject:
int ob_refcnt
PyTypeObject *ob_type
int ob_size
unsigned int *ob_digit
def test(temp = long(0)):
cdef _longobject *l
l = <_longobject *> temp
print sizeof(l.ob_size)
print sizeof(l.ob_digit[0])
# cython: nonexistant = True
# cython: boundscheck = true
# cython: boundscheck = 9
print 3
# Options should not be interpreted any longer:
# cython: boundscheck = true
_ERRORS = u"""
3:0: boundscheck directive must be set to True or False
4:0: boundscheck directive must be set to True or False
"""
#cython: nonexistant
#cython: some=9
# The one below should NOT raise an error
#cython: boundscheck=True
# However this one should
#cython: boundscheck=sadf
print 3
#cython: boundscheck=True
_ERRORS = u"""
2:0: Expected "=" in option "nonexistant"
3:0: Unknown option: "some"
10:0: Must pass a boolean value for option "boundscheck"
"""
/* See bufaccess.pyx */
typedef short htypedef_short;
typedef short td_h_short;
typedef double td_h_double;
typedef unsigned short td_h_ushort;
This diff is collapsed.
# coding: ASCII
__doc__ = u"""
>>> s = test()
>>> assert s == ''.join([chr(i) for i in range(0x10,0xFF,0x11)] + [chr(0xFF)]), repr(s)
"""
def test():
cdef char s[17]
s[ 0] = c'\x10'
s[ 1] = c'\x21'
s[ 2] = c'\x32'
s[ 3] = c'\x43'
s[ 4] = c'\x54'
s[ 5] = c'\x65'
s[ 6] = c'\x76'
s[ 7] = c'\x87'
s[ 8] = c'\x98'
s[ 9] = c'\xA9'
s[10] = c'\xBA'
s[11] = c'\xCB'
s[12] = c'\xDC'
s[13] = c'\xED'
s[14] = c'\xFE'
s[15] = c'\xFF'
s[16] = c'\x00'
return s
__doc__ = u"""
>>> s = test()
>>> assert s == ''.join([chr(i) for i in range(1,49)]), s
>>> assert s == ''.join([chr(i) for i in range(1,49)]), repr(s)
"""
def test():
......
cdef class Spam:
cdef public Spam e
......@@ -7,7 +7,21 @@ __doc__ = """
>>> s.e is s
True
>>> s.e = None
>>> s = Bot()
>>> s.e = s
>>> s.e = 1
Traceback (most recent call last):
TypeError: Cannot convert int to extmember.Bot
>>> s.e is s
True
>>> s.e = None
"""
# declared in the pxd
cdef class Spam:
cdef public Spam e
pass
# not declared in the pxd
cdef class Bot:
cdef public Bot e
......@@ -7,6 +7,22 @@ __doc__ = u"""
>>> h(56, 7)
105.0
>>> arrays()
19
>>> attributes()
26 26 26
>>> smoketest()
10
>>> test_side_effects()
side effect 1
c side effect 2
side effect 3
c side effect 4
([0, 11, 102, 3, 4], [0, 1, 2, 13, 104])
"""
def f(a,b):
......@@ -26,3 +42,71 @@ def h(double a, double b):
a += b
a *= b
return a
cimport stdlib
def arrays():
cdef char* buf = <char*>stdlib.malloc(10)
cdef int i = 2
cdef object j = 2
buf[2] = 0
buf[i] += 2
buf[2] *= 10
buf[j] -= 1
print buf[2]
stdlib.free(buf)
cdef class A:
cdef attr
cdef int attr2
cdef char* buf
def __init__(self):
self.attr = 3
self.attr2 = 3
class B:
attr = 3
def attributes():
cdef A a = A()
b = B()
a.attr += 10
a.attr *= 2
a.attr2 += 10
a.attr2 *= 2
b.attr += 10
b.attr *= 2
print a.attr, a.attr2, b.attr
def get_2(): return 2
cdef int identity(int value): return value
def smoketest():
cdef char* buf = <char*>stdlib.malloc(10)
cdef A a = A()
a.buf = buf
a.buf[identity(1)] = 0
(a.buf + identity(4) - <int>(2*get_2() - 1))[get_2() - 2*identity(1)] += 10
print a.buf[1]
stdlib.free(buf)
def side_effect(x):
print "side effect", x
return x
cdef int c_side_effect(int x):
print "c side effect", x
return x
def test_side_effects():
a = range(5)
a[side_effect(1)] += 10
a[c_side_effect(2)] += 100
cdef int i
cdef int b[5]
for i from 0 <= i < 5:
b[i] = i
b[side_effect(3)] += 10
b[c_side_effect(4)] += 100
return a, [b[i] for i from 0 <= i < 5]
# cannot be named "numpy" in order to not clash with the numpy module!
cimport numpy as np
try:
import numpy as np
__doc__ = """
>>> basic()
[[0 1 2 3 4]
[5 6 7 8 9]]
2 0 9 5
>>> three_dim()
[[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]]
<_BLANKLINE_>
[[ 8. 9. 10. 11.]
[ 12. 13. 14. 15.]]
<_BLANKLINE_>
[[ 16. 17. 18. 19.]
[ 20. 21. 22. 23.]]]
6.0 0.0 13.0 8.0
>>> obj_array()
[a 1 {}]
a 1 {}
Test various forms of slicing, picking etc.
>>> a = np.arange(10, dtype='l').reshape(2, 5)
>>> print_long_2d(a)
0 1 2 3 4
5 6 7 8 9
>>> print_long_2d(a[::-1, ::-1])
9 8 7 6 5
4 3 2 1 0
>>> print_long_2d(a[1:2, 1:3])
6 7
>>> print_long_2d(a[::2, ::2])
0 2 4
>>> print_long_2d(a[::4, :])
0 1 2 3 4
>>> print_long_2d(a[:, 1:5:2])
1 3
6 8
>>> print_long_2d(a[:, 5:1:-2])
4 2
9 7
>>> print_long_2d(a[:, [3, 1]])
3 1
8 6
>>> print_long_2d(a.T)
0 5
1 6
2 7
3 8
4 9
Write to slices
>>> b = a.copy()
>>> put_range_long_1d(b[:, 3])
>>> print b
[[0 1 2 0 4]
[5 6 7 1 9]]
>>> put_range_long_1d(b[::-1, 3])
>>> print b
[[0 1 2 1 4]
[5 6 7 0 9]]
>>> a = np.zeros(9, dtype='l')
>>> put_range_long_1d(a[1::3])
>>> print a
[0 0 0 0 1 0 0 2 0]
Write to picked subarrays. This should NOT change the original
array as picking creates a new mutable copy.
>>> a = np.zeros(10, dtype='l').reshape(2, 5)
>>> put_range_long_1d(a[[0, 0, 1, 1, 0], [0, 1, 2, 4, 3]])
>>> print a
[[0 0 0 0 0]
[0 0 0 0 0]]
>>> test_dtype('b', inc1_byte)
>>> test_dtype('B', inc1_ubyte)
>>> test_dtype('h', inc1_short)
>>> test_dtype('H', inc1_ushort)
>>> test_dtype('i', inc1_int)
>>> test_dtype('I', inc1_uint)
>>> test_dtype('l', inc1_long)
>>> test_dtype('L', inc1_ulong)
>>> test_dtype('f', inc1_float)
>>> test_dtype('d', inc1_double)
>>> test_dtype('g', inc1_longdouble)
>>> test_dtype('O', inc1_object)
>>> test_dtype(np.int, inc1_int_t)
>>> test_dtype(np.long, inc1_long_t)
>>> test_dtype(np.float, inc1_float_t)
>>> test_dtype(np.double, inc1_double_t)
>>> test_dtype(np.longdouble, inc1_longdouble_t)
>>> test_dtype(np.int32, inc1_int32_t)
>>> test_dtype(np.float64, inc1_float64_t)
Unsupported types:
>>> test_dtype(np.complex, inc1_byte)
Traceback (most recent call last):
...
ValueError: only objects, int and float dtypes supported for ndarray buffer access so far (dtype is 15)
>>> a = np.zeros((10,), dtype=np.dtype('i4,i4'))
>>> inc1_byte(a)
Traceback (most recent call last):
...
ValueError: only objects, int and float dtypes supported for ndarray buffer access so far (dtype is 20)
"""
except:
__doc__ = ""
def ndarray_str(arr):
"""
Since Py2.3 doctest don't support <BLANKLINE>, manually replace blank lines
with <_BLANKLINE_>
"""
return str(arr).replace('\n\n', '\n<_BLANKLINE_>\n')
def basic():
cdef object[int, ndim=2] buf = np.arange(10, dtype='i').reshape((2, 5))
print buf
print buf[0, 2], buf[0, 0], buf[1, 4], buf[1, 0]
def three_dim():
cdef object[double, ndim=3] buf = np.arange(24, dtype='d').reshape((3,2,4))
print ndarray_str(buf)
print buf[0, 1, 2], buf[0, 0, 0], buf[1, 1, 1], buf[1, 0, 0]
def obj_array():
cdef object[object, ndim=1] buf = np.array(["a", 1, {}])
print buf
print buf[0], buf[1], buf[2]
def print_long_2d(np.ndarray[long, ndim=2] arr):
cdef int i, j
for i in range(arr.shape[0]):
print " ".join([str(arr[i, j]) for j in range(arr.shape[1])])
def put_range_long_1d(np.ndarray[long] arr):
"""Writes 0,1,2,... to array and returns array"""
cdef int value = 0, i
for i in range(arr.shape[0]):
arr[i] = value
value += 1
# Exhaustive dtype tests -- increments element [1] by 1 for all dtypes
def inc1_byte(np.ndarray[char] arr): arr[1] += 1
def inc1_ubyte(np.ndarray[unsigned char] arr): arr[1] += 1
def inc1_short(np.ndarray[short] arr): arr[1] += 1
def inc1_ushort(np.ndarray[unsigned short] arr): arr[1] += 1
def inc1_int(np.ndarray[int] arr): arr[1] += 1
def inc1_uint(np.ndarray[unsigned int] arr): arr[1] += 1
def inc1_long(np.ndarray[long] arr): arr[1] += 1
def inc1_ulong(np.ndarray[unsigned long] arr): arr[1] += 1
def inc1_longlong(np.ndarray[long long] arr): arr[1] += 1
def inc1_ulonglong(np.ndarray[unsigned long long] arr): arr[1] += 1
def inc1_float(np.ndarray[float] arr): arr[1] += 1
def inc1_double(np.ndarray[double] arr): arr[1] += 1
def inc1_longdouble(np.ndarray[long double] arr): arr[1] += 1
def inc1_object(np.ndarray[object] arr):
o = arr[1]
o += 1
arr[1] = o # unfortunately, += segfaults for objects
def inc1_int_t(np.ndarray[np.int_t] arr): arr[1] += 1
def inc1_long_t(np.ndarray[np.long_t] arr): arr[1] += 1
def inc1_float_t(np.ndarray[np.float_t] arr): arr[1] += 1
def inc1_double_t(np.ndarray[np.double_t] arr): arr[1] += 1
def inc1_longdouble_t(np.ndarray[np.longdouble_t] arr): arr[1] += 1
# The tests below only work on platforms that has the given types
def inc1_int32_t(np.ndarray[np.int32_t] arr): arr[1] += 1
def inc1_float64_t(np.ndarray[np.float64_t] arr): arr[1] += 1
def test_dtype(dtype, inc1):
a = np.array([0, 10], dtype=dtype)
inc1(a)
if a[1] != 11: print "failed!"
......@@ -3,12 +3,15 @@ __doc__ = u"""
<BLANKLINE>
1
1 test
1 test
1 test 42 spam
"""
def f(a, b):
print
print a
print a,
print b
print a, b
print a, b,
print 42, u"spam"
# cannot be named "numpy" in order to no clash with the numpy module!
cimport numpy
try:
import numpy
__doc__ = """
>>> basic()
[[0 1 2 3 4]
[5 6 7 8 9]]
2 0 9 5
"""
except:
__doc__ = ""
def basic():
cdef object[int, 2] buf = numpy.arange(10).reshape((2, 5))
print buf
print buf[0, 2], buf[0, 0], buf[1, 4], buf[1, 0]
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment