Commit 4f237620 authored by Unknown's avatar Unknown

Trivial typo fixes

Most are non-user facing. 
Found using:
`codespell -d -q 3`
parent 01e9ff88
...@@ -363,7 +363,7 @@ class CodeWriter(DeclarationWriter): ...@@ -363,7 +363,7 @@ class CodeWriter(DeclarationWriter):
self.dedent() self.dedent()
def visit_IfStatNode(self, node): def visit_IfStatNode(self, node):
# The IfClauseNode is handled directly without a seperate match # The IfClauseNode is handled directly without a separate match
# for clariy. # for clariy.
self.startline(u"if ") self.startline(u"if ")
self.visit(node.if_clauses[0].condition) self.visit(node.if_clauses[0].condition)
......
...@@ -326,7 +326,7 @@ def put_acquire_arg_buffer(entry, code, pos): ...@@ -326,7 +326,7 @@ def put_acquire_arg_buffer(entry, code, pos):
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % entry.type.dtype.struct_nesting_depth()) code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % entry.type.dtype.struct_nesting_depth())
code.putln(code.error_goto_if("%s == -1" % getbuffer, pos)) code.putln(code.error_goto_if("%s == -1" % getbuffer, pos))
code.putln("}") code.putln("}")
# An exception raised in arg parsing cannot be catched, so no # An exception raised in arg parsing cannot be caught, so no
# need to care about the buffer then. # need to care about the buffer then.
put_unpack_buffer_aux_into_scope(entry, code) put_unpack_buffer_aux_into_scope(entry, code)
...@@ -617,7 +617,7 @@ class GetAndReleaseBufferUtilityCode(object): ...@@ -617,7 +617,7 @@ class GetAndReleaseBufferUtilityCode(object):
def mangle_dtype_name(dtype): def mangle_dtype_name(dtype):
# Use prefixes to seperate user defined types from builtins # Use prefixes to separate user defined types from builtins
# (consider "typedef float unsigned_int") # (consider "typedef float unsigned_int")
if dtype.is_pyobject: if dtype.is_pyobject:
return "object" return "object"
...@@ -636,7 +636,7 @@ def get_type_information_cname(code, dtype, maxdepth=None): ...@@ -636,7 +636,7 @@ def get_type_information_cname(code, dtype, maxdepth=None):
and return the name of the type info struct. and return the name of the type info struct.
Structs with two floats of the same size are encoded as complex numbers. Structs with two floats of the same size are encoded as complex numbers.
One can seperate between complex numbers declared as struct or with native One can separate between complex numbers declared as struct or with native
encoding by inspecting to see if the fields field of the type is encoding by inspecting to see if the fields field of the type is
filled in. filled in.
""" """
......
...@@ -12,7 +12,7 @@ class ExtractPxdCode(VisitorTransform): ...@@ -12,7 +12,7 @@ class ExtractPxdCode(VisitorTransform):
The result is a tuple (StatListNode, ModuleScope), i.e. The result is a tuple (StatListNode, ModuleScope), i.e.
everything that is needed from the pxd after it is processed. everything that is needed from the pxd after it is processed.
A purer approach would be to seperately compile the pxd code, A purer approach would be to separately compile the pxd code,
but the result would have to be slightly more sophisticated but the result would have to be slightly more sophisticated
than pure strings (functions + wanted interned strings + than pure strings (functions + wanted interned strings +
wanted utility code + wanted cached objects) so for now this wanted utility code + wanted cached objects) so for now this
......
...@@ -4185,7 +4185,7 @@ class BufferIndexNode(_IndexingBaseNode): ...@@ -4185,7 +4185,7 @@ class BufferIndexNode(_IndexingBaseNode):
if is_pythran_expr(base_type) and is_pythran_supported_type(rhs.type): if is_pythran_expr(base_type) and is_pythran_supported_type(rhs.type):
obj = code.funcstate.allocate_temp(PythranExpr(pythran_type(self.base.type)), manage_ref=False) obj = code.funcstate.allocate_temp(PythranExpr(pythran_type(self.base.type)), manage_ref=False)
# We have got to do this because we have to declare pythran objects # We have got to do this because we have to declare pythran objects
# at the beggining of the functions. # at the beginning of the functions.
# Indeed, Cython uses "goto" statement for error management, and # Indeed, Cython uses "goto" statement for error management, and
# RAII doesn't work with that kind of construction. # RAII doesn't work with that kind of construction.
# Moreover, the way Pythran expressions are made is that they don't # Moreover, the way Pythran expressions are made is that they don't
...@@ -10427,7 +10427,7 @@ class CythonArrayNode(ExprNode): ...@@ -10427,7 +10427,7 @@ class CythonArrayNode(ExprNode):
def allocate_temp_result(self, code): def allocate_temp_result(self, code):
if self.temp_code: if self.temp_code:
raise RuntimeError("temp allocated mulitple times") raise RuntimeError("temp allocated multiple times")
self.temp_code = code.funcstate.allocate_temp(self.type, True) self.temp_code = code.funcstate.allocate_temp(self.type, True)
......
...@@ -225,7 +225,7 @@ class FusedCFuncDefNode(StatListNode): ...@@ -225,7 +225,7 @@ class FusedCFuncDefNode(StatListNode):
""" """
Create a new local scope for the copied node and append it to Create a new local scope for the copied node and append it to
self.nodes. A new local scope is needed because the arguments with the self.nodes. A new local scope is needed because the arguments with the
fused types are aready in the local scope, and we need the specialized fused types are already in the local scope, and we need the specialized
entries created after analyse_declarations on each specialized version entries created after analyse_declarations on each specialized version
of the (CFunc)DefNode. of the (CFunc)DefNode.
f2s is a dict mapping each fused type to its specialized version f2s is a dict mapping each fused type to its specialized version
...@@ -438,7 +438,7 @@ class FusedCFuncDefNode(StatListNode): ...@@ -438,7 +438,7 @@ class FusedCFuncDefNode(StatListNode):
if dtype is not None: if dtype is not None:
itemsize = dtype.itemsize itemsize = dtype.itemsize
kind = ord(dtype.kind) kind = ord(dtype.kind)
# We only support the endianess of the current compiler # We only support the endianness of the current compiler
byteorder = dtype.byteorder byteorder = dtype.byteorder
if byteorder == "<" and not __Pyx_Is_Little_Endian(): if byteorder == "<" and not __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False arg_is_pythran_compatible = False
......
...@@ -356,7 +356,7 @@ class Context(object): ...@@ -356,7 +356,7 @@ class Context(object):
from ..Parser import ConcreteSyntaxTree from ..Parser import ConcreteSyntaxTree
except ImportError: except ImportError:
raise RuntimeError( raise RuntimeError(
"Formal grammer can only be used with compiled Cython with an available pgen.") "Formal grammar can only be used with compiled Cython with an available pgen.")
ConcreteSyntaxTree.p_module(source_filename) ConcreteSyntaxTree.p_module(source_filename)
except UnicodeDecodeError as e: except UnicodeDecodeError as e:
#import traceback #import traceback
...@@ -683,7 +683,7 @@ def compile(source, options = None, full_module_name = None, **kwds): ...@@ -683,7 +683,7 @@ def compile(source, options = None, full_module_name = None, **kwds):
compile(source [, options], [, <option> = <value>]...) compile(source [, options], [, <option> = <value>]...)
Compile one or more Pyrex implementation files, with optional timestamp Compile one or more Pyrex implementation files, with optional timestamp
checking and recursing on dependecies. The source argument may be a string checking and recursing on dependencies. The source argument may be a string
or a sequence of strings If it is a string and no recursion or timestamp or a sequence of strings If it is a string and no recursion or timestamp
checking is requested, a CompilationResult is returned, otherwise a checking is requested, a CompilationResult is returned, otherwise a
CompilationResultSet is returned. CompilationResultSet is returned.
......
...@@ -1927,7 +1927,7 @@ class FuncDefNode(StatNode, BlockNode): ...@@ -1927,7 +1927,7 @@ class FuncDefNode(StatNode, BlockNode):
code.put_var_incref(entry) code.put_var_incref(entry)
# Note: defaults are always incref-ed. For def functions, we # Note: defaults are always incref-ed. For def functions, we
# we aquire arguments from object converstion, so we have # we acquire arguments from object converstion, so we have
# new references. If we are a cdef function, we need to # new references. If we are a cdef function, we need to
# incref our arguments # incref our arguments
elif is_cdef and entry.type.is_memoryviewslice and len(entry.cf_assignments) > 1: elif is_cdef and entry.type.is_memoryviewslice and len(entry.cf_assignments) > 1:
...@@ -4234,7 +4234,7 @@ class GeneratorBodyDefNode(DefNode): ...@@ -4234,7 +4234,7 @@ class GeneratorBodyDefNode(DefNode):
class OverrideCheckNode(StatNode): class OverrideCheckNode(StatNode):
# A Node for dispatching to the def method if it # A Node for dispatching to the def method if it
# is overriden. # is overridden.
# #
# py_func # py_func
# #
......
...@@ -1877,7 +1877,7 @@ if VALUE is not None: ...@@ -1877,7 +1877,7 @@ if VALUE is not None:
def visit_FuncDefNode(self, node): def visit_FuncDefNode(self, node):
""" """
Analyse a function and its body, as that hasn't happend yet. Also Analyse a function and its body, as that hasn't happened yet. Also
analyse the directive_locals set by @cython.locals(). analyse the directive_locals set by @cython.locals().
Then, if we are a function with fused arguments, replace the function Then, if we are a function with fused arguments, replace the function
...@@ -2066,7 +2066,7 @@ if VALUE is not None: ...@@ -2066,7 +2066,7 @@ if VALUE is not None:
# Some nodes are no longer needed after declaration # Some nodes are no longer needed after declaration
# analysis and can be dropped. The analysis was performed # analysis and can be dropped. The analysis was performed
# on these nodes in a seperate recursive process from the # on these nodes in a separate recursive process from the
# enclosing function or module, so we can simply drop them. # enclosing function or module, so we can simply drop them.
def visit_CDeclaratorNode(self, node): def visit_CDeclaratorNode(self, node):
# necessary to ensure that all CNameDeclaratorNodes are visited. # necessary to ensure that all CNameDeclaratorNodes are visited.
......
...@@ -168,7 +168,7 @@ class SourceDescriptor(object): ...@@ -168,7 +168,7 @@ class SourceDescriptor(object):
if self._escaped_description is None: if self._escaped_description is None:
esc_desc = \ esc_desc = \
self.get_description().encode('ASCII', 'replace').decode("ASCII") self.get_description().encode('ASCII', 'replace').decode("ASCII")
# Use foreward slashes on Windows since these paths # Use forward slashes on Windows since these paths
# will be used in the #line directives in the C/C++ files. # will be used in the #line directives in the C/C++ files.
self._escaped_description = esc_desc.replace('\\', '/') self._escaped_description = esc_desc.replace('\\', '/')
return self._escaped_description return self._escaped_description
......
# #
# Nodes used as utilities and support for transforms etc. # Nodes used as utilities and support for transforms etc.
# These often make up sets including both Nodes and ExprNodes # These often make up sets including both Nodes and ExprNodes
# so it is convenient to have them in a seperate module. # so it is convenient to have them in a separate module.
# #
from __future__ import absolute_import from __future__ import absolute_import
......
...@@ -2596,7 +2596,7 @@ class PythonCodeExecutor(object): ...@@ -2596,7 +2596,7 @@ class PythonCodeExecutor(object):
inferior. inferior.
Of course, executing any code in the inferior may be dangerous and may Of course, executing any code in the inferior may be dangerous and may
leave the debuggee in an unsafe state or terminate it alltogether. leave the debuggee in an unsafe state or terminate it altogether.
""" """
if '\0' in code: if '\0' in code:
raise gdb.GdbError("String contains NUL byte.") raise gdb.GdbError("String contains NUL byte.")
......
...@@ -213,7 +213,7 @@ cdef extern from "Python.h": ...@@ -213,7 +213,7 @@ cdef extern from "Python.h":
object PyList_AsTuple (object) object PyList_AsTuple (object)
int PyList_Check (object) # Always succeeds. int PyList_Check (object) # Always succeeds.
int PyList_CheckExact (object) # Always succeeds. int PyList_CheckExact (object) # Always succeeds.
int PyList_GET_SIZE (object) # Always suceeds. int PyList_GET_SIZE (object) # Always succeeds.
object PyList_GetSlice (object, Py_ssize_t, Py_ssize_t) object PyList_GetSlice (object, Py_ssize_t, Py_ssize_t)
int PyList_Insert (object, Py_ssize_t, object) except -1 int PyList_Insert (object, Py_ssize_t, object) except -1
object PyList_New (Py_ssize_t) object PyList_New (Py_ssize_t)
......
...@@ -10,13 +10,13 @@ ...@@ -10,13 +10,13 @@
# Read http://docs.python.org/api/refcounts.html which is so # Read http://docs.python.org/api/refcounts.html which is so
# important I've copied it below. # important I've copied it below.
# #
# For all the declaration below, whenver the Py_ function returns # For all the declaration below, whenever the Py_ function returns
# a *new reference* to a PyObject*, the return type is "object". # a *new reference* to a PyObject*, the return type is "object".
# When the function returns a borrowed reference, the return # When the function returns a borrowed reference, the return
# type is PyObject*. When Cython sees "object" as a return type # type is PyObject*. When Cython sees "object" as a return type
# it doesn't increment the reference count. When it sees PyObject* # it doesn't increment the reference count. When it sees PyObject*
# in order to use the result you must explicitly cast to <object>, # in order to use the result you must explicitly cast to <object>,
# and when you do that Cython increments the reference count wether # and when you do that Cython increments the reference count whether
# you want it to or not, forcing you to an explicit DECREF (or leak memory). # you want it to or not, forcing you to an explicit DECREF (or leak memory).
# To avoid this we make the above convention. Note, you can # To avoid this we make the above convention. Note, you can
# always locally override this convention by putting something like # always locally override this convention by putting something like
......
...@@ -92,7 +92,7 @@ cdef extern from *: # Hard-coded utility code hack. ...@@ -92,7 +92,7 @@ cdef extern from *: # Hard-coded utility code hack.
def __getbuffer__(self, Py_buffer* info, int flags): def __getbuffer__(self, Py_buffer* info, int flags):
# This implementation of getbuffer is geared towards Cython # This implementation of getbuffer is geared towards Cython
# requirements, and does not yet fullfill the PEP. # requirements, and does not yet fulfill the PEP.
# In particular strided access is always provided regardless # In particular strided access is always provided regardless
# of flags # of flags
item_count = Py_SIZE(self) item_count = Py_SIZE(self)
...@@ -143,7 +143,7 @@ cdef inline array copy(array self): ...@@ -143,7 +143,7 @@ cdef inline array copy(array self):
return op return op
cdef inline int extend_buffer(array self, char* stuff, Py_ssize_t n) except -1: cdef inline int extend_buffer(array self, char* stuff, Py_ssize_t n) except -1:
""" efficent appending of new stuff of same type """ efficient appending of new stuff of same type
(e.g. of same array type) (e.g. of same array type)
n: number of elements (not number of bytes!) """ n: number of elements (not number of bytes!) """
cdef Py_ssize_t itemsize = self.ob_descr.itemsize cdef Py_ssize_t itemsize = self.ob_descr.itemsize
......
...@@ -213,7 +213,7 @@ cdef extern from "numpy/arrayobject.h": ...@@ -213,7 +213,7 @@ cdef extern from "numpy/arrayobject.h":
# -- the details of this may change. # -- the details of this may change.
def __getbuffer__(ndarray self, Py_buffer* info, int flags): def __getbuffer__(ndarray self, Py_buffer* info, int flags):
# This implementation of getbuffer is geared towards Cython # This implementation of getbuffer is geared towards Cython
# requirements, and does not yet fullfill the PEP. # requirements, and does not yet fulfill the PEP.
# In particular strided access is always provided regardless # In particular strided access is always provided regardless
# of flags # of flags
......
...@@ -127,7 +127,7 @@ arglist: argument (',' argument)* [','] ...@@ -127,7 +127,7 @@ arglist: argument (',' argument)* [',']
# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, # to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
# we explicitly match '*' here, too, to give it proper precedence. # we explicitly match '*' here, too, to give it proper precedence.
# Illegal combinations and orderings are blocked in ast.c: # Illegal combinations and orderings are blocked in ast.c:
# multiple (test comp_for) arguements are blocked; keyword unpackings # multiple (test comp_for) arguments are blocked; keyword unpackings
# that precede iterable unpackings are blocked; etc. # that precede iterable unpackings are blocked; etc.
argument: ( test [comp_for] | argument: ( test [comp_for] |
test '=' test | test '=' test |
......
...@@ -4,7 +4,7 @@ class TestCodeWriter(CythonTest): ...@@ -4,7 +4,7 @@ class TestCodeWriter(CythonTest):
# CythonTest uses the CodeWriter heavily, so do some checking by # CythonTest uses the CodeWriter heavily, so do some checking by
# roundtripping Cython code through the test framework. # roundtripping Cython code through the test framework.
# Note that this test is dependant upon the normal Cython parser # Note that this test is dependent upon the normal Cython parser
# to generate the input trees to the CodeWriter. This save *a lot* # to generate the input trees to the CodeWriter. This save *a lot*
# of time; better to spend that time writing other tests than perfecting # of time; better to spend that time writing other tests than perfecting
# this one... # this one...
......
...@@ -1340,7 +1340,7 @@ cdef void broadcast_leading({{memviewslice_name}} *mslice, ...@@ -1340,7 +1340,7 @@ cdef void broadcast_leading({{memviewslice_name}} *mslice,
mslice.suboffsets[i] = -1 mslice.suboffsets[i] = -1
# #
### Take care of refcounting the objects in slices. Do this seperately from any copying, ### Take care of refcounting the objects in slices. Do this separately from any copying,
### to minimize acquiring the GIL ### to minimize acquiring the GIL
# #
......
/* /*
These functions provide integer arithmetic with integer checking. They do not These functions provide integer arithmetic with integer checking. They do not
actually raise an exception when an overflow is detected, but rather set a bit actually raise an exception when an overflow is detected, but rather set a bit
in the overflow parameter. (This parameter may be re-used accross several in the overflow parameter. (This parameter may be re-used across several
arithmetic operations, so should be or-ed rather than assigned to.) arithmetic operations, so should be or-ed rather than assigned to.)
The implementation is divided into two parts, the signed and unsigned basecases, The implementation is divided into two parts, the signed and unsigned basecases,
......
...@@ -17,7 +17,7 @@ DESCRIPTION ...@@ -17,7 +17,7 @@ DESCRIPTION
with one or more Cython modules built in. This allows one to create a single with one or more Cython modules built in. This allows one to create a single
executable from Cython code, without having to have separate shared objects executable from Cython code, without having to have separate shared objects
for each Cython module. A major advantage of this approach is that it allows for each Cython module. A major advantage of this approach is that it allows
debuging with gprof(1), which does not work with shared objects. debugging with gprof(1), which does not work with shared objects.
Unless ``-p`` is given, the first module's ``__name__`` is set to Unless ``-p`` is given, the first module's ``__name__`` is set to
``"__main__"`` and is imported on startup; if ``-p`` is given, a normal Python ``"__main__"`` and is imported on startup; if ``-p`` is given, a normal Python
......
...@@ -48,7 +48,7 @@ def pyx_library( ...@@ -48,7 +48,7 @@ def pyx_library(
["-s '%s=%s'" % x for x in cython_options]) ["-s '%s=%s'" % x for x in cython_options])
# TODO(robertwb): It might be better to only generate the C files, # TODO(robertwb): It might be better to only generate the C files,
# letting cc_library (or similar) handle the rest, but there isn't yet # letting cc_library (or similar) handle the rest, but there isn't yet
# suport compiling Python C extensions from bazel. # support compiling Python C extensions from bazel.
native.genrule( native.genrule(
name = name + "_cythonize", name = name + "_cythonize",
srcs = pyx_srcs, srcs = pyx_srcs,
......
...@@ -116,7 +116,7 @@ def pyext_coms(platform): ...@@ -116,7 +116,7 @@ def pyext_coms(platform):
return pyext_cccom, pyext_cxxcom, pyext_linkcom return pyext_cccom, pyext_cxxcom, pyext_linkcom
def set_basic_vars(env): def set_basic_vars(env):
# Set construction variables which are independant on whether we are using # Set construction variables which are independent on whether we are using
# distutils or not. # distutils or not.
env['PYEXTCPPPATH'] = SCons.Util.CLVar('$PYEXTINCPATH') env['PYEXTCPPPATH'] = SCons.Util.CLVar('$PYEXTINCPATH')
......
...@@ -4,7 +4,7 @@ environment: ...@@ -4,7 +4,7 @@ environment:
global: global:
# SDK v7.0 MSVC Express 2008's SetEnv.cmd script will fail if the # SDK v7.0 MSVC Express 2008's SetEnv.cmd script will fail if the
# /E:ON and /V:ON options are not enabled in the batch script intepreter # /E:ON and /V:ON options are not enabled in the batch script interpreter
# See: http://stackoverflow.com/a/13751649/163740 # See: http://stackoverflow.com/a/13751649/163740
WITH_ENV: "cmd /E:ON /V:ON /C .\\appveyor\\run_with_env.cmd" WITH_ENV: "cmd /E:ON /V:ON /C .\\appveyor\\run_with_env.cmd"
......
...@@ -273,7 +273,7 @@ attribute access, you could just implement some properties:: ...@@ -273,7 +273,7 @@ attribute access, you could just implement some properties::
Cython initializes C++ class attributes of a cdef class using the nullary constructor. Cython initializes C++ class attributes of a cdef class using the nullary constructor.
If the class you're wrapping does not have a nullary constructor, you must store a pointer If the class you're wrapping does not have a nullary constructor, you must store a pointer
to the wrapped class and manually allocate and deallocate it. to the wrapped class and manually allocate and deallocate it.
A convienient and safe place to do so is in the `__cinit__` and `__dealloc__` methods A convenient and safe place to do so is in the `__cinit__` and `__dealloc__` methods
which are guaranteed to be called exactly once upon creation and deletion of the Python which are guaranteed to be called exactly once upon creation and deletion of the Python
instance. instance.
...@@ -645,7 +645,7 @@ e.g.:: ...@@ -645,7 +645,7 @@ e.g.::
cdef vector[int] v = ... cdef vector[int] v = ...
it = v.begin() it = v.begin()
(Though of course the ``for .. in`` syntax is prefered for objects supporting (Though of course the ``for .. in`` syntax is preferred for objects supporting
the iteration protocol.) the iteration protocol.)
RTTI and typeid() RTTI and typeid()
......
...@@ -502,11 +502,11 @@ def install(pyximport=True, pyimport=False, build_dir=None, build_in_temp=True, ...@@ -502,11 +502,11 @@ def install(pyximport=True, pyimport=False, build_dir=None, build_in_temp=True,
``build_in_temp=False`` will produce the C files locally. Working ``build_in_temp=False`` will produce the C files locally. Working
with complex dependencies and debugging becomes more easy. This with complex dependencies and debugging becomes more easy. This
can principally interfere with existing files of the same name. can principally interfere with existing files of the same name.
build_in_temp can be overriden by <modulename>.pyxbld/make_setup_args() build_in_temp can be overridden by <modulename>.pyxbld/make_setup_args()
by a dict item of 'build_in_temp' by a dict item of 'build_in_temp'
``setup_args``: dict of arguments for Distribution - see ``setup_args``: dict of arguments for Distribution - see
distutils.core.setup() . They are extended/overriden by those of distutils.core.setup() . They are extended/overridden by those of
<modulename>.pyxbld/make_setup_args() <modulename>.pyxbld/make_setup_args()
``reload_support``: Enables support for dynamic ``reload_support``: Enables support for dynamic
......
...@@ -94,7 +94,7 @@ def _cleanup_files(): ...@@ -94,7 +94,7 @@ def _cleanup_files():
def get_distutils_distro(_cache=[]): def get_distutils_distro(_cache=[]):
if _cache: if _cache:
return _cache[0] return _cache[0]
# late import to accomodate for setuptools override # late import to accommodate for setuptools override
from distutils.dist import Distribution from distutils.dist import Distribution
distutils_distro = Distribution() distutils_distro = Distribution()
...@@ -1765,7 +1765,7 @@ def main(): ...@@ -1765,7 +1765,7 @@ def main():
help="do not delete the generated C files (allows passing --no-cython on next run)") help="do not delete the generated C files (allows passing --no-cython on next run)")
parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs", parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs",
action="store_false", default=True, action="store_false", default=True,
help="do not delete the generated shared libary files (allows manual module experimentation)") help="do not delete the generated shared library files (allows manual module experimentation)")
parser.add_option("--no-cleanup-failures", dest="cleanup_failures", parser.add_option("--no-cleanup-failures", dest="cleanup_failures",
action="store_false", default=True, action="store_false", default=True,
help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only") help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only")
......
...@@ -649,7 +649,7 @@ def c_contig(object[int, ndim=1, mode='c'] buf): ...@@ -649,7 +649,7 @@ def c_contig(object[int, ndim=1, mode='c'] buf):
@testcase @testcase
def c_contig_2d(object[int, ndim=2, mode='c'] buf): def c_contig_2d(object[int, ndim=2, mode='c'] buf):
""" """
Multi-dim has seperate implementation Multi-dim has separate implementation
>>> A = IntMockBuffer(None, range(12), shape=(3,4)) >>> A = IntMockBuffer(None, range(12), shape=(3,4))
>>> c_contig_2d(A) >>> c_contig_2d(A)
......
...@@ -452,7 +452,7 @@ def c_contig(int[::1] mslice): ...@@ -452,7 +452,7 @@ def c_contig(int[::1] mslice):
def c_contig_2d(int[:, ::1] mslice): def c_contig_2d(int[:, ::1] mslice):
""" """
Multi-dim has seperate implementation Multi-dim has separate implementation
>>> A = IntMockBuffer(None, range(12), shape=(3,4)) >>> A = IntMockBuffer(None, range(12), shape=(3,4))
>>> c_contig_2d(A) >>> c_contig_2d(A)
......
...@@ -662,7 +662,7 @@ def c_contig(int[::1] buf): ...@@ -662,7 +662,7 @@ def c_contig(int[::1] buf):
@testcase @testcase
def c_contig_2d(int[:, ::1] buf): def c_contig_2d(int[:, ::1] buf):
""" """
Multi-dim has seperate implementation Multi-dim has separate implementation
>>> A = IntMockBuffer(None, range(12), shape=(3,4)) >>> A = IntMockBuffer(None, range(12), shape=(3,4))
>>> c_contig_2d(A) >>> c_contig_2d(A)
......
/* A set of mutually incompatable return types. */ /* A set of mutually incompatible return types. */
struct short_return { char *msg; }; struct short_return { char *msg; };
struct int_return { char *msg; }; struct int_return { char *msg; };
......
...@@ -30,9 +30,9 @@ cpdef check(func, op, a, b): ...@@ -30,9 +30,9 @@ cpdef check(func, op, a, b):
op_res = op(a, b) op_res = op(a, b)
except OverflowError: except OverflowError:
assign_overflow = True assign_overflow = True
assert func_overflow == assign_overflow, "Inconsistant overflow: %s(%s, %s)" % (func, a, b) assert func_overflow == assign_overflow, "Inconsistent overflow: %s(%s, %s)" % (func, a, b)
if not func_overflow: if not func_overflow:
assert res == op_res, "Inconsistant values: %s(%s, %s) == %s != %s" % (func, a, b, res, op_res) assert res == op_res, "Inconsistent values: %s(%s, %s) == %s != %s" % (func, a, b, res, op_res)
medium_values = (max_value_ / 2, max_value_ / 3, min_value_ / 2, <INT>sqrt(<long double>max_value_) - <INT>1, <INT>sqrt(<long double>max_value_) + 1) medium_values = (max_value_ / 2, max_value_ / 3, min_value_ / 2, <INT>sqrt(<long double>max_value_) - <INT>1, <INT>sqrt(<long double>max_value_) + 1)
def run_test(func, op): def run_test(func, op):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment