Commit b3d32712 authored by Mark's avatar Mark

Merge pull request #82 from markflorisson88/copy2

Atomic acquisition counting, slice assignment (+scalars), overlapping memory, broadcasting, more memoryview object methods, optimized with gil block
parents 50bd1a47 beb90546
......@@ -748,7 +748,7 @@ def get_type_information_cname(code, dtype, maxdepth=None):
typeinfo = ('static __Pyx_TypeInfo %s = '
'{ "%s", %s, sizeof(%s), { %s }, %s, \'%s\', %s, %s };')
tup = (name, rep, structinfo_name, declcode,
', '.join([str(x) for x in arraysizes]), len(arraysizes),
', '.join([str(x) for x in arraysizes]) or '0', len(arraysizes),
typegroup, is_unsigned, flags)
typecode.putln(typeinfo % tup, safe=True)
......
......@@ -12,6 +12,7 @@ cython.declare(error=object, warning=object, warn_once=object, InternalError=obj
Builtin=object, Symtab=object, Utils=object, find_coercion_error=object,
debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object)
import sys
import operator
from Errors import error, warning, warn_once, InternalError, CompileError
......@@ -220,6 +221,9 @@ class ExprNode(Node):
constant_result = constant_value_not_set
# whether this node with a memoryview type should be broadcast
memslice_broadcast = False
try:
_get_child_attrs = operator.attrgetter('subexprs')
except AttributeError:
......@@ -643,7 +647,8 @@ class ExprNode(Node):
error(self.pos,
"Cannot convert '%s' to memoryviewslice" %
(src_type,))
elif not MemoryView.src_conforms_to_dst(src.type, dst_type):
elif not MemoryView.src_conforms_to_dst(
src.type, dst_type, broadcast=self.memslice_broadcast):
if src.type.dtype.same_as(dst_type.dtype):
msg = "Memoryview '%s' not conformable to memoryview '%s'."
tup = src.type, dst_type
......@@ -1690,7 +1695,7 @@ class NameNode(AtomicExprNode):
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_local or entry.in_closure or entry.from_closure:
elif entry.is_local or entry.in_closure or entry.from_closure or entry.type.is_memoryviewslice:
# Raise UnboundLocalError for objects and memoryviewslices
raise_unbound = (
(self.cf_maybe_null or self.cf_is_null) and not self.allow_null)
......@@ -1811,7 +1816,6 @@ class NameNode(AtomicExprNode):
lhs_pos=self.pos,
rhs=rhs,
code=code,
incref_rhs=rhs.is_name,
have_gil=not self.in_nogil_context)
def generate_acquire_buffer(self, rhs, code):
......@@ -2340,7 +2344,11 @@ class IndexNode(ExprNode):
# Whether we are indexing or slicing a memoryviewslice
memslice_index = False
memslice_slice = False
is_memslice_copy = False
memslice_ellipsis_noop = False
warned_untyped_idx = False
# set by SingleAssignmentNode after analyse_types()
is_memslice_scalar_assignment = False
def __init__(self, pos, index, *args, **kw):
ExprNode.__init__(self, pos, index=index, *args, **kw)
......@@ -2555,13 +2563,6 @@ class IndexNode(ExprNode):
indices[i] = index
new_indices.append(index)
if access in ('ptr', 'generic') and i != 0 and have_slices:
self.type = error_type
return error(index.pos,
"Indexing of non-leading indirect or generic "
"dimensions not supported yet, "
"try slicing with i:i+1")
else:
self.type = error_type
return error(index.pos, "Invalid index for memoryview specified")
......@@ -2571,7 +2572,6 @@ class IndexNode(ExprNode):
# All indices with all start/stop/step for slices.
# We need to keep this around
self.indices = new_indices
self.env = env
elif self.base.type.is_buffer:
......@@ -2619,7 +2619,10 @@ class IndexNode(ExprNode):
self.type = self.base.type
self.is_memoryviewslice_access = True
if getting:
error(self.pos, "memoryviews currently support setting only.")
self.memslice_ellipsis_noop = True
else:
self.is_memslice_copy = True
self.memslice_broadcast = True
elif self.memslice_slice:
self.index = None
......@@ -2627,6 +2630,8 @@ class IndexNode(ExprNode):
self.use_managed_ref = True
self.type = PyrexTypes.MemoryViewSliceType(
self.base.type.dtype, axes)
if setting:
self.memslice_broadcast = True
else:
base_type = self.base.type
......@@ -2825,6 +2830,8 @@ class IndexNode(ExprNode):
def calculate_result_code(self):
if self.is_buffer_access:
return "(*%s)" % self.buffer_ptr_code
elif self.is_memslice_copy:
return self.base.result()
elif self.base.type is list_type:
return "PyList_GET_ITEM(%s, %s)" % (self.base.result(), self.index.result())
elif self.base.type is tuple_type:
......@@ -2849,7 +2856,7 @@ class IndexNode(ExprNode):
def generate_subexpr_evaluation_code(self, code):
self.base.generate_evaluation_code(code)
if not self.indices:
if self.indices is None:
self.index.generate_evaluation_code(code)
else:
for i in self.indices:
......@@ -2857,7 +2864,7 @@ class IndexNode(ExprNode):
def generate_subexpr_disposal_code(self, code):
self.base.generate_disposal_code(code)
if not self.indices:
if self.indices is None:
self.index.generate_disposal_code(code)
else:
for i in self.indices:
......@@ -2865,7 +2872,7 @@ class IndexNode(ExprNode):
def free_subexpr_temps(self, code):
self.base.free_temps(code)
if not self.indices:
if self.indices is None:
self.index.free_temps(code)
else:
for i in self.indices:
......@@ -2954,14 +2961,6 @@ class IndexNode(ExprNode):
self.extra_index_params(),
code.error_goto(self.pos)))
def generate_memoryviewslice_copy_code(self, rhs, code, op=""):
assert isinstance(self.index, EllipsisNode)
import MemoryView
util_code = MemoryView.CopyContentsFuncUtilCode(rhs.type, self.type)
func_name = util_code.copy_contents_name
code.putln(code.error_goto_if_neg("%s(&%s, &%s)" % (func_name, rhs.result(), self.base.result()), self.pos))
code.globalstate.use_utility_code(util_code)
def generate_buffer_setitem_code(self, rhs, code, op=""):
# Used from generate_assignment_code and InPlaceAssignmentNode
if code.globalstate.directives['nonecheck'] and not self.memslice_index:
......@@ -2991,9 +2990,10 @@ class IndexNode(ExprNode):
self.generate_subexpr_evaluation_code(code)
if self.is_buffer_access or self.memslice_index:
self.generate_buffer_setitem_code(rhs, code)
elif self.is_memslice_scalar_assignment:
self.generate_memoryviewslice_assign_scalar_code(rhs, code)
elif self.memslice_slice:
error(rhs.pos, "Slice assignment not supported yet")
#self.generate_memoryviewslice_setslice_code(rhs, code)
self.generate_memoryviewslice_setslice_code(rhs, code)
elif self.is_memoryviewslice_access:
self.generate_memoryviewslice_copy_code(rhs, code)
elif self.type.is_pyobject:
......@@ -3048,6 +3048,7 @@ class IndexNode(ExprNode):
return buffer_entry
def buffer_lookup_code(self, code):
"ndarray[1, 2, 3] and memslice[1, 2, 3]"
# Assign indices to temps
index_temps = [code.funcstate.allocate_temp(i.type, manage_ref=False)
for i in self.indices]
......@@ -3074,12 +3075,54 @@ class IndexNode(ExprNode):
negative_indices=negative_indices)
def put_memoryviewslice_slice_code(self, code):
"memslice[:]"
buffer_entry = self.buffer_entry()
have_gil = not self.in_nogil_context
buffer_entry.generate_buffer_slice_code(code,
self.original_indices,
if sys.version_info < (3,):
def next_(it):
return it.next()
else:
next_ = next
have_slices = False
it = iter(self.indices)
for index in self.original_indices:
is_slice = isinstance(index, SliceNode)
have_slices = have_slices or is_slice
if is_slice:
if not index.start.is_none:
index.start = next_(it)
if not index.stop.is_none:
index.stop = next_(it)
if not index.step.is_none:
index.step = next_(it)
else:
next_(it)
assert not list(it)
buffer_entry.generate_buffer_slice_code(code, self.original_indices,
self.result(),
have_gil=have_gil)
have_gil=have_gil,
have_slices=have_slices)
def generate_memoryviewslice_setslice_code(self, rhs, code):
"memslice1[:] = memslice2"
import MemoryView
self.generate_evaluation_code(code)
MemoryView.copy_broadcast_memview_src_to_dst(rhs, self, code)
def generate_memoryviewslice_copy_code(self, rhs, code):
"memslice1[...] = memslice2"
import MemoryView
MemoryView.copy_broadcast_memview_src_to_dst(rhs, self, code)
def generate_memoryviewslice_assign_scalar_code(self, rhs, code):
"memslice1[...] = 0.0 or memslice1[:] = 0.0"
import MemoryView
self.generate_evaluation_code(code)
MemoryView.assign_scalar(self, rhs, code)
def put_nonecheck(self, code):
code.globalstate.use_utility_code(raise_noneindex_error_utility_code)
......@@ -4347,7 +4390,7 @@ class AttributeNode(ExprNode):
self.type = self.obj.type
return
else:
obj_type.declare_attribute(self.attribute, env)
obj_type.declare_attribute(self.attribute, env, self.pos)
entry = obj_type.scope.lookup_here(self.attribute)
if entry and entry.is_member:
entry = None
......@@ -4524,8 +4567,7 @@ class AttributeNode(ExprNode):
elif self.type.is_memoryviewslice:
import MemoryView
MemoryView.put_assign_to_memviewslice(
select_code, rhs.result(), self.type, code,
incref_rhs=rhs.is_name)
select_code, rhs, rhs.result(), self.type, code)
if not self.type.is_memoryviewslice:
code.putln(
......
......@@ -79,7 +79,7 @@ def mangle_dtype_name(dtype):
# return "".join([access[0].upper()+packing[0] for (access, packing) in axes])
def put_acquire_memoryviewslice(lhs_cname, lhs_type, lhs_pos, rhs, code,
incref_rhs=False, have_gil=False):
have_gil=False):
assert rhs.type.is_memoryviewslice
pretty_rhs = isinstance(rhs, NameNode) or rhs.result_in_temp()
......@@ -91,16 +91,16 @@ def put_acquire_memoryviewslice(lhs_cname, lhs_type, lhs_pos, rhs, code,
# Allow uninitialized assignment
#code.putln(code.put_error_if_unbound(lhs_pos, rhs.entry))
put_assign_to_memviewslice(lhs_cname, rhstmp, lhs_type, code, incref_rhs,
put_assign_to_memviewslice(lhs_cname, rhs, rhstmp, lhs_type, code,
have_gil=have_gil)
if not pretty_rhs:
code.funcstate.release_temp(rhstmp)
def put_assign_to_memviewslice(lhs_cname, rhs_cname, memviewslicetype, code,
incref_rhs=False, have_gil=False):
def put_assign_to_memviewslice(lhs_cname, rhs, rhs_cname, memviewslicetype, code,
have_gil=False):
code.put_xdecref_memoryviewslice(lhs_cname, have_gil=have_gil)
if incref_rhs:
if rhs.is_name:
code.put_incref_memoryviewslice(rhs_cname, have_gil=have_gil)
code.putln("%s = %s;" % (lhs_cname, rhs_cname))
......@@ -128,8 +128,19 @@ def get_buf_flags(specs):
else:
return memview_strided_access
def insert_newaxes(memoryviewtype, n):
axes = [('direct', 'strided')] * n
axes.extend(memoryviewtype.axes)
return PyrexTypes.MemoryViewSliceType(memoryviewtype.dtype, axes)
def src_conforms_to_dst(src, dst):
def broadcast_types(src, dst):
n = abs(src.ndim - dst.ndim)
if src.ndim < dst.ndim:
return insert_newaxes(src, n), dst
else:
return src, insert_newaxes(dst, n)
def src_conforms_to_dst(src, dst, broadcast=False):
'''
returns True if src conforms to dst, False otherwise.
......@@ -144,8 +155,12 @@ def src_conforms_to_dst(src, dst):
if src.dtype != dst.dtype:
return False
if len(src.axes) != len(dst.axes):
return False
if src.ndim != dst.ndim:
if broadcast:
src, dst = broadcast_types(src, dst)
else:
return False
for src_spec, dst_spec in zip(src.axes, dst.axes):
src_access, src_packing = src_spec
......@@ -259,7 +274,8 @@ class MemoryViewSliceBufferEntry(Buffer.BufferEntry):
return bufp
def generate_buffer_slice_code(self, code, indices, dst, have_gil):
def generate_buffer_slice_code(self, code, indices, dst, have_gil,
have_slices):
"""
Slice a memoryviewslice.
......@@ -269,59 +285,54 @@ class MemoryViewSliceBufferEntry(Buffer.BufferEntry):
Simply call __pyx_memoryview_slice_memviewslice with the right
arguments.
"""
slicefunc = "__pyx_memoryview_slice_memviewslice"
new_ndim = 0
cname = self.cname
suboffset_dim = code.funcstate.allocate_temp(PyrexTypes.c_int_type,
False)
index_code = ("%(slicefunc)s(&%(cname)s, &%(dst)s, %(have_gil)d, "
"%(dim)d, %(new_ndim)d, &%(suboffset_dim)s, "
"%(idx)s, 0, 0, 0, 0, 0, 0)")
slice_code = ("%(slicefunc)s(&%(cname)s, &%(dst)s, %(have_gil)d, "
"/* dim */ %(dim)d, "
"/* new_ndim */ %(new_ndim)d, "
"/* suboffset_dim */ &%(suboffset_dim)s, "
"/* start */ %(start)s, "
"/* stop */ %(stop)s, "
"/* step */ %(step)s, "
"/* have_start */ %(have_start)d, "
"/* have_stop */ %(have_stop)d, "
"/* have_step */ %(have_step)d, "
"/* is_slice */ 1)")
def generate_slice_call(expr):
pos = index.pos
if have_gil:
code.putln(code.error_goto_if(expr, pos))
else:
code.putln("{")
code.putln( "const char *__pyx_t_result = %s;" % expr)
code.putln( "if (unlikely(__pyx_t_result)) {")
code.put_ensure_gil()
code.putln( "PyErr_Format(PyExc_IndexError, "
"__pyx_t_result, %d);" % dim)
code.put_release_ensured_gil()
code.putln(code.error_goto(pos))
code.putln( "}")
code.putln("}")
code.putln("%s = -1;" % suboffset_dim)
code.putln("%(dst)s.data = %(cname)s.data;" % locals())
code.putln("%(dst)s.memview = %(cname)s.memview;" % locals())
src = self.cname
def load_slice_util(name, dict):
proto, impl = TempitaUtilityCode.load_as_string(
name, "MemoryView_C.c", context=dict)
return impl
all_dimensions_direct = True
for access, packing in self.type.axes:
if access != 'direct':
all_dimensions_direct = False
break
no_suboffset_dim = all_dimensions_direct and not have_slices
if not no_suboffset_dim:
suboffset_dim = code.funcstate.allocate_temp(
PyrexTypes.c_int_type, False)
code.putln("%s = -1;" % suboffset_dim)
code.putln("%(dst)s.data = %(src)s.data;" % locals())
code.putln("%(dst)s.memview = %(src)s.memview;" % locals())
code.put_incref_memoryviewslice(dst)
for dim, index in enumerate(indices):
error_goto = code.error_goto(index.pos)
if not isinstance(index, ExprNodes.SliceNode):
# normal index
idx = index.result()
generate_slice_call(index_code % locals())
access, packing = self.type.axes[dim]
if access == 'direct':
indirect = False
else:
indirect = True
generic = (access == 'full')
if new_ndim != 0:
return error(index.pos,
"All preceding dimensions must be "
"indexed and not sliced")
d = locals()
code.put(load_slice_util("SliceIndex", d))
else:
d = {}
# slice, unspecified dimension, or part of ellipsis
d = locals()
for s in "start stop step".split():
idx = getattr(index, s)
have_idx = d['have_' + s] = not idx.is_none
......@@ -330,11 +341,21 @@ class MemoryViewSliceBufferEntry(Buffer.BufferEntry):
else:
d[s] = "0"
d.update(locals())
generate_slice_call(slice_code % d)
if (not d['have_start'] and
not d['have_stop'] and
not d['have_step']):
# full slice (:), simply copy over the extent, stride
# and suboffset. Also update suboffset_dim if needed
access, packing = self.type.axes[dim]
d['access'] = access
code.put(load_slice_util("SimpleSlice", d))
else:
code.put(load_slice_util("ToughSlice", d))
new_ndim += 1
code.funcstate.release_temp(suboffset_dim)
if not no_suboffset_dim:
code.funcstate.release_temp(suboffset_dim)
def empty_slice(pos):
......@@ -384,235 +405,201 @@ def get_memoryview_flag(access, packing):
assert (access, packing) == ('direct', 'contig'), (access, packing)
return 'contiguous'
def get_copy_func_name(to_memview):
base = "__Pyx_BufferNew_%s_From_%s"
if to_memview.is_c_contig:
return base % ('C', to_memview.specialization_suffix())
else:
return base % ('F', to_memview.specialization_suffix())
def get_copy_contents_name(from_mvs, to_mvs):
assert from_mvs.dtype == to_mvs.dtype
return '__Pyx_BufferCopyContents_%s_to_%s' % (from_mvs.specialization_suffix(),
to_mvs.specialization_suffix())
def get_is_contig_func_name(c_or_f):
return "__pyx_memviewslice_is_%s_contig" % c_or_f
def get_is_contig_func_name(c_or_f, ndim):
return "__pyx_memviewslice_is_%s_contig%d" % (c_or_f, ndim)
copy_to_template = '''
static int %(copy_to_name)s(const __Pyx_memviewslice from_mvs, __Pyx_memviewslice to_mvs) {
/* ensure from_mvs & to_mvs have the same shape & dtype */
}
'''
class CopyContentsFuncUtilCode(object):
requires = None
def __init__(self, from_memview, to_memview):
self.from_memview = from_memview
self.to_memview = to_memview
self.copy_contents_name = get_copy_contents_name(from_memview, to_memview)
def __eq__(self, other):
if not isinstance(other, CopyContentsFuncUtilCode):
return False
return other.copy_contents_name == self.copy_contents_name
def __hash__(self):
return hash(self.copy_contents_name)
def get_tree(self): pass
def put_code(self, output):
code = output['utility_code_def']
proto = output['utility_code_proto']
func_decl, func_impl = \
get_copy_contents_func(self.from_memview, self.to_memview, self.copy_contents_name)
def get_is_contig_utility(c_contig, ndim):
C = dict(context, ndim=ndim)
if c_contig:
utility = load_memview_c_utility("MemviewSliceIsCContig", C,
requires=[is_contig_utility])
else:
utility = load_memview_c_utility("MemviewSliceIsFContig", C,
requires=[is_contig_utility])
proto.put(func_decl)
code.put(func_impl)
return utility
class CopyFuncUtilCode(object):
def copy_src_to_dst_cname():
return "__pyx_memoryview_copy_contents"
requires = None
def verify_direct_dimensions(node):
for access, packing in node.type.axes:
if access != 'direct':
error(self.pos, "All dimensions must be direct")
def __init__(self, from_memview, to_memview):
if from_memview.dtype != to_memview.dtype:
raise ValueError("dtypes must be the same!")
if len(from_memview.axes) != len(to_memview.axes):
raise ValueError("number of dimensions must be same")
if not (to_memview.is_c_contig or to_memview.is_f_contig):
raise ValueError("to_memview must be c or f contiguous.")
for (access, packing) in from_memview.axes:
if access != 'direct':
raise NotImplementedError("cannot handle 'full' or 'ptr' access at this time.")
def copy_broadcast_memview_src_to_dst(src, dst, code):
"""
Copy the contents of slice src to slice dst. Does not support indirect
slices.
"""
verify_direct_dimensions(src)
verify_direct_dimensions(dst)
code.putln(code.error_goto_if_neg(
"%s(%s, %s, %d, %d, %d)" % (copy_src_to_dst_cname(),
src.result(), dst.result(),
src.type.ndim, dst.type.ndim,
dst.type.dtype.is_pyobject),
dst.pos))
def get_1d_fill_scalar_func(type, code):
dtype = type.dtype
type_decl = dtype.declaration_code("")
dtype_name = mangle_dtype_name(dtype)
context = dict(dtype_name=dtype_name, type_decl=type_decl)
utility = load_memview_c_utility("FillStrided1DScalar", context)
code.globalstate.use_utility_code(utility)
return '__pyx_fill_slice_%s' % dtype_name
def assign_scalar(dst, scalar, code):
"""
Assign a scalar to a slice. dst must be a temp, scalar will be assigned
to a correct type and not just something assignable.
"""
verify_direct_dimensions(dst)
dtype = dst.type.dtype
type_decl = dtype.declaration_code("")
slice_decl = dst.type.declaration_code("")
code.begin_block()
code.putln("%s __pyx_temp_scalar = %s;" % (type_decl, scalar.result()))
if dst.result_in_temp() or (dst.base.is_name and
isinstance(dst.index, ExprNodes.EllipsisNode)):
dst_temp = dst.result()
else:
code.putln("%s __pyx_temp_slice = %s;" % (slice_decl, dst.result()))
dst_temp = "__pyx_temp_slice"
self.from_memview = from_memview
self.to_memview = to_memview
self.copy_func_name = get_copy_func_name(to_memview)
# with slice_iter(dst.type, dst_temp, dst.type.ndim, code) as p:
slice_iter_obj = slice_iter(dst.type, dst_temp, dst.type.ndim, code)
p = slice_iter_obj.start_loops()
self.requires = [CopyContentsFuncUtilCode(from_memview, to_memview)]
if dtype.is_pyobject:
code.putln("Py_DECREF(*(PyObject **) %s);" % p)
def __eq__(self, other):
if not isinstance(other, CopyFuncUtilCode):
return False
return other.copy_func_name == self.copy_func_name
code.putln("*((%s *) %s) = __pyx_temp_scalar;" % (type_decl, p))
def __hash__(self):
return hash(self.copy_func_name)
if dtype.is_pyobject:
code.putln("Py_INCREF(__pyx_temp_scalar);")
def get_tree(self): pass
slice_iter_obj.end_loops()
code.end_block()
def put_code(self, output):
code = output['utility_code_def']
proto = output['utility_code_proto']
def slice_iter(slice_type, slice_temp, ndim, code):
if slice_type.is_c_contig or slice_type.is_f_contig:
return ContigSliceIter(slice_type, slice_temp, ndim, code)
else:
return StridedSliceIter(slice_type, slice_temp, ndim, code)
class SliceIter(object):
def __init__(self, slice_type, slice_temp, ndim, code):
self.slice_type = slice_type
self.slice_temp = slice_temp
self.code = code
self.ndim = ndim
class ContigSliceIter(SliceIter):
def start_loops(self):
code = self.code
code.begin_block()
type_decl = self.slice_type.dtype.declaration_code("")
total_size = ' * '.join("%s.shape[%d]" % (self.slice_temp, i)
for i in range(self.ndim))
code.putln("Py_ssize_t __pyx_temp_extent = %s;" % total_size)
code.putln("Py_ssize_t __pyx_temp_idx;")
code.putln("%s *__pyx_temp_pointer = (%s *) %s.data;" % (
type_decl, type_decl, self.slice_temp))
code.putln("for (__pyx_temp_idx = 0; "
"__pyx_temp_idx < __pyx_temp_extent; "
"__pyx_temp_idx++) {")
return "__pyx_temp_pointer"
def end_loops(self):
self.code.putln("__pyx_temp_pointer += 1;")
self.code.putln("}")
self.code.end_block()
class StridedSliceIter(SliceIter):
def start_loops(self):
code = self.code
code.begin_block()
for i in range(self.ndim):
t = i, self.slice_temp, i
code.putln("Py_ssize_t __pyx_temp_extent_%d = %s.shape[%d];" % t)
code.putln("Py_ssize_t __pyx_temp_stride_%d = %s.strides[%d];" % t)
code.putln("char *__pyx_temp_pointer_%d;" % i)
code.putln("Py_ssize_t __pyx_temp_idx_%d;" % i)
code.putln("__pyx_temp_pointer_0 = %s.data;" % self.slice_temp)
for i in range(self.ndim):
if i > 0:
code.putln("__pyx_temp_pointer_%d = __pyx_temp_pointer_%d;" % (i, i - 1))
code.putln("for (__pyx_temp_idx_%d = 0; "
"__pyx_temp_idx_%d < __pyx_temp_extent_%d; "
"__pyx_temp_idx_%d++) {" % (i, i, i, i))
return "__pyx_temp_pointer_%d" % (self.ndim - 1)
def end_loops(self):
code = self.code
for i in range(self.ndim - 1, -1, -1):
code.putln("__pyx_temp_pointer_%d += __pyx_temp_stride_%d;" % (i, i))
code.putln("}")
code.end_block()
def copy_c_or_fortran_cname(memview):
if memview.is_c_contig:
c_or_f = 'c'
else:
c_or_f = 'f'
proto.put(Buffer.dedent("""\
static __Pyx_memviewslice %s(const __Pyx_memviewslice from_mvs); /* proto */
""" % self.copy_func_name))
return "__pyx_memoryview_copy_slice_%s_%s" % (
memview.specialization_suffix(), c_or_f)
copy_contents_name = get_copy_contents_name(self.from_memview, self.to_memview)
def get_copy_new_utility(pos, from_memview, to_memview):
if from_memview.dtype != to_memview.dtype:
return error(pos, "dtypes must be the same!")
if len(from_memview.axes) != len(to_memview.axes):
return error(pos, "number of dimensions must be same")
if not (to_memview.is_c_contig or to_memview.is_f_contig):
return error(pos, "to_memview must be c or f contiguous.")
if self.to_memview.is_c_contig:
mode = 'c'
contig_flag = memview_c_contiguous
elif self.to_memview.is_f_contig:
mode = 'fortran'
contig_flag = memview_f_contiguous
for (access, packing) in from_memview.axes:
if access != 'direct':
return error(
pos, "cannot handle 'full' or 'ptr' access at this time.")
C = dict(
if to_memview.is_c_contig:
mode = 'c'
contig_flag = memview_c_contiguous
elif to_memview.is_f_contig:
mode = 'fortran'
contig_flag = memview_f_contiguous
return load_memview_c_utility(
"CopyContentsUtility",
context=dict(
context,
copy_name=self.copy_func_name,
mode=mode,
sizeof_dtype="sizeof(%s)" % self.from_memview.dtype.declaration_code(''),
dtype_decl=to_memview.dtype.declaration_code(''),
contig_flag=contig_flag,
copy_contents_name=copy_contents_name
)
_, copy_code = TempitaUtilityCode.load_as_string(
"MemviewSliceCopyTemplate",
from_file="MemoryView_C.c",
context=C)
code.put(copy_code)
def get_copy_contents_func(from_mvs, to_mvs, cfunc_name):
assert from_mvs.dtype == to_mvs.dtype
assert len(from_mvs.axes) == len(to_mvs.axes)
ndim = len(from_mvs.axes)
# XXX: we only support direct access for now.
for (access, packing) in from_mvs.axes:
if access != 'direct':
raise NotImplementedError("currently only direct access is supported.")
code_decl = ("static int %(cfunc_name)s(const __Pyx_memviewslice *from_mvs,"
"__Pyx_memviewslice *to_mvs); /* proto */" % {'cfunc_name' : cfunc_name})
code_impl = '''
static int %(cfunc_name)s(const __Pyx_memviewslice *from_mvs, __Pyx_memviewslice *to_mvs) {
char *to_buf = (char *)to_mvs->data;
char *from_buf = (char *)from_mvs->data;
struct __pyx_memoryview_obj *temp_memview = 0;
char *temp_data = 0;
int ndim_idx = 0;
for(ndim_idx=0; ndim_idx<%(ndim)d; ndim_idx++) {
if(from_mvs->shape[ndim_idx] != to_mvs->shape[ndim_idx]) {
PyErr_Format(PyExc_ValueError,
"memoryview shapes not the same in dimension %%d", ndim_idx);
return -1;
}
}
''' % {'cfunc_name' : cfunc_name, 'ndim' : ndim}
# raise NotImplementedError("put in shape checking code here!!!")
INDENT = " "
dtype_decl = from_mvs.dtype.declaration_code("")
last_idx = ndim-1
if to_mvs.is_c_contig or to_mvs.is_f_contig:
if to_mvs.is_c_contig:
start, stop, step = 0, ndim, 1
elif to_mvs.is_f_contig:
start, stop, step = ndim-1, -1, -1
for i, idx in enumerate(range(start, stop, step)):
# the crazy indexing is to account for the fortran indexing.
# 'i' always goes up from zero to ndim-1.
# 'idx' is the same as 'i' for c_contig, and goes from ndim-1 to 0 for f_contig.
# this makes the loop code below identical in both cases.
code_impl += INDENT+"Py_ssize_t i%d = 0, idx%d = 0;\n" % (i,i)
code_impl += INDENT+"Py_ssize_t stride%(i)d = from_mvs->strides[%(idx)d];\n" % {'i':i, 'idx':idx}
code_impl += INDENT+"Py_ssize_t shape%(i)d = from_mvs->shape[%(idx)d];\n" % {'i':i, 'idx':idx}
code_impl += "\n"
# put down the nested for-loop.
for k in range(ndim):
code_impl += INDENT*(k+1) + "for(i%(k)d=0; i%(k)d<shape%(k)d; i%(k)d++) {\n" % {'k' : k}
if k >= 1:
code_impl += INDENT*(k+2) + "idx%(k)d = i%(k)d * stride%(k)d + idx%(km1)d;\n" % {'k' : k, 'km1' : k-1}
else:
code_impl += INDENT*(k+2) + "idx%(k)d = i%(k)d * stride%(k)d;\n" % {'k' : k}
# the inner part of the loop.
code_impl += INDENT*(ndim+1)+"memcpy(to_buf, from_buf+idx%(last_idx)d, sizeof(%(dtype_decl)s));\n" % locals()
code_impl += INDENT*(ndim+1)+"to_buf += sizeof(%(dtype_decl)s);\n" % locals()
else:
code_impl += INDENT+"/* 'f' prefix is for the 'from' memview, 't' prefix is for the 'to' memview */\n"
for i in range(ndim):
code_impl += INDENT+"char *fi%d = 0, *ti%d = 0, *end%d = 0;\n" % (i,i,i)
code_impl += INDENT+"Py_ssize_t fstride%(i)d = from_mvs->strides[%(i)d];\n" % {'i':i}
code_impl += INDENT+"Py_ssize_t fshape%(i)d = from_mvs->shape[%(i)d];\n" % {'i':i}
code_impl += INDENT+"Py_ssize_t tstride%(i)d = to_mvs->strides[%(i)d];\n" % {'i':i}
# code_impl += INDENT+"Py_ssize_t tshape%(i)d = to_mvs->shape[%(i)d];\n" % {'i':i}
code_impl += INDENT+"end0 = fshape0 * fstride0 + from_mvs->data;\n"
code_impl += INDENT+"for(fi0=from_buf, ti0=to_buf; fi0 < end0; fi0 += fstride0, ti0 += tstride0) {\n"
for i in range(1, ndim):
code_impl += INDENT*(i+1)+"end%(i)d = fshape%(i)d * fstride%(i)d + fi%(im1)d;\n" % {'i' : i, 'im1' : i-1}
code_impl += INDENT*(i+1)+"for(fi%(i)d=fi%(im1)d, ti%(i)d=ti%(im1)d; fi%(i)d < end%(i)d; fi%(i)d += fstride%(i)d, ti%(i)d += tstride%(i)d) {\n" % {'i':i, 'im1':i-1}
code_impl += INDENT*(ndim+1)+"*(%(dtype_decl)s*)(ti%(last_idx)d) = *(%(dtype_decl)s*)(fi%(last_idx)d);\n" % locals()
# for-loop closing braces
for k in range(ndim-1, -1, -1):
code_impl += INDENT*(k+1)+"}\n"
# init to_mvs->data and to_mvs shape/strides/suboffsets arrays.
code_impl += INDENT+"temp_memview = to_mvs->memview;\n"
code_impl += INDENT+"temp_data = to_mvs->data;\n"
code_impl += INDENT+"to_mvs->memview = 0; to_mvs->data = 0;\n"
code_impl += INDENT+"if(unlikely(-1 == __Pyx_init_memviewslice(temp_memview, %d, to_mvs))) {\n" % (ndim,)
code_impl += INDENT*2+"return -1;\n"
code_impl += INDENT+"}\n"
code_impl += INDENT + "return 0;\n"
code_impl += '}\n'
return code_decl, code_impl
ndim=to_memview.ndim,
func_cname=copy_c_or_fortran_cname(to_memview),
dtype_is_object=int(to_memview.dtype.is_pyobject)),
requires=[copy_contents_new_utility])
def get_axes_specs(env, axes):
'''
get_axes_specs(env, axes) -> list of (access, packing) specs for each axis.
access is one of 'full', 'ptr' or 'direct'
packing is one of 'contig', 'strided' or 'follow'
'''
......@@ -878,7 +865,8 @@ def load_memview_c_utility(util_code_name, context=None, **kwargs):
context=context, **kwargs)
def use_cython_array_utility_code(env):
env.global_scope().context.cython_scope.lookup('array_cwrapper').used = True
scope = env.global_scope().context.cython_scope
scope.lookup('array_cwrapper').used = True
env.use_utility_code(cython_array_utility_code)
context = {
......@@ -892,11 +880,15 @@ memviewslice_declare_code = load_memview_c_utility(
proto_block='utility_code_proto_before_types',
context=context)
atomic_utility = load_memview_c_utility("Atomics", context,
proto_block='utility_code_proto_before_types')
memviewslice_init_code = load_memview_c_utility(
"MemviewSliceInit",
context=dict(context, BUF_MAX_NDIMS=Options.buffer_max_dims),
requires=[memviewslice_declare_code,
Buffer.acquire_utility_code],
Buffer.acquire_utility_code,
atomic_utility],
)
memviewslice_index_helpers = load_memview_c_utility("MemviewSliceIndex")
......@@ -905,10 +897,12 @@ typeinfo_to_format_code = load_memview_cy_utility(
"BufferFormatFromTypeInfo", requires=[Buffer._typeinfo_to_format_code])
is_contig_utility = load_memview_c_utility("MemviewSliceIsContig", context)
is_c_contig_utility = load_memview_c_utility("MemviewSliceIsCContig", context,
requires=[is_contig_utility])
is_f_contig_utility = load_memview_c_utility("MemviewSliceIsFContig", context,
requires=[is_contig_utility])
overlapping_utility = load_memview_c_utility("OverlappingSlices", context)
copy_contents_new_utility = load_memview_c_utility(
"MemviewSliceCopyTemplate",
context,
requires=[], # require cython_array_utility_code
)
view_utility_code = load_memview_cy_utility(
"View.MemoryView",
......@@ -916,7 +910,10 @@ view_utility_code = load_memview_cy_utility(
requires=[Buffer.GetAndReleaseBufferUtilityCode(),
Buffer.buffer_struct_declare_code,
Buffer.empty_bufstruct_utility,
memviewslice_init_code],
memviewslice_init_code,
is_contig_utility,
overlapping_utility,
copy_contents_new_utility],
)
cython_array_utility_code = load_memview_cy_utility(
......@@ -924,6 +921,8 @@ cython_array_utility_code = load_memview_cy_utility(
context=context,
requires=[view_utility_code])
copy_contents_new_utility.requires.append(cython_array_utility_code)
# memview_fromslice_utility_code = load_memview_cy_utility(
# "MemviewFromSlice",
# context=context,
......
......@@ -1460,7 +1460,8 @@ class FuncDefNode(StatNode, BlockNode):
if self.return_type.is_pyobject:
init = " = NULL"
elif self.return_type.is_memoryviewslice:
init = "= {0, 0}"
import MemoryView
init = ' = ' + MemoryView.memslice_entry_init
code.putln(
"%s%s;" %
......@@ -1479,20 +1480,47 @@ class FuncDefNode(StatNode, BlockNode):
# ----- GIL acquisition
acquire_gil = self.acquire_gil
# See if we need to acquire the GIL for variable declarations and
acquire_gil_for_var_decls_only = (lenv.nogil and
lenv.has_with_gil_block)
# See if we need to acquire the GIL for variable declarations, or for
# refnanny only
# Profiling or closures are not currently possible for cdef nogil
# functions, but check them anyway
have_object_args = (self.needs_closure or self.needs_outer_scope or
profile)
for arg in lenv.arg_entries:
if arg.type.is_pyobject:
have_object_args = True
break
acquire_gil_for_var_decls_only = (
lenv.nogil and lenv.has_with_gil_block and
(have_object_args or lenv.buffer_entries))
use_refnanny = not lenv.nogil or acquire_gil_for_var_decls_only
acquire_gil_for_refnanny_only = (
lenv.nogil and lenv.has_with_gil_block and not
acquire_gil_for_var_decls_only)
use_refnanny = not lenv.nogil or lenv.has_with_gil_block
if acquire_gil or acquire_gil_for_var_decls_only:
code.put_ensure_gil()
# ----- set up refnanny
if use_refnanny:
if acquire_gil_for_refnanny_only:
code.declare_gilstate()
code.putln("#if CYTHON_REFNANNY")
code.put_ensure_gil(declare_gilstate=False)
code.putln("#endif /* CYTHON_REFNANNY */")
tempvardecl_code.put_declare_refcount_context()
code.put_setup_refcount_context(self.entry.name)
if acquire_gil_for_refnanny_only:
code.putln("#if CYTHON_REFNANNY")
code.put_release_ensured_gil()
code.putln("#endif /* CYTHON_REFNANNY */")
# ----- Automatic lead-ins for certain special functions
if is_getbuffer_slot:
self.getbuffer_init(code)
......@@ -1510,7 +1538,7 @@ class FuncDefNode(StatNode, BlockNode):
if use_refnanny:
code.put_finish_refcount_context()
if acquire_gil_for_var_decls_only:
if acquire_gil or acquire_gil_for_var_decls_only:
code.put_release_ensured_gil()
# FIXME: what if the error return value is a Python value?
......@@ -1551,11 +1579,12 @@ class FuncDefNode(StatNode, BlockNode):
not entry.in_closure):
code.put_var_incref(entry)
# Note: defaults are always increffed. For def functions, we
# Note: defaults are always incref-ed. For def functions, we
# we aquire arguments from object converstion, so we have
# new references. If we are a cdef function, we need to
# incref our arguments
if is_cdef and entry.type.is_memoryviewslice:
elif (is_cdef and entry.type.is_memoryviewslice and
len(entry.cf_assignments) > 1):
code.put_incref_memoryviewslice(entry.cname,
have_gil=not lenv.nogil)
for entry in lenv.var_entries:
......@@ -1566,10 +1595,6 @@ class FuncDefNode(StatNode, BlockNode):
for entry in lenv.var_entries + lenv.arg_entries:
if entry.type.is_buffer and entry.buffer_aux.buflocal_nd_var.used:
Buffer.put_init_vars(entry, code)
# ----- Initialise local memoryviewslices
for entry in lenv.var_entries:
if entry.visibility == "private" and not entry.used:
continue
# ----- Check and convert arguments
self.generate_argument_type_tests(code)
......@@ -1631,13 +1656,13 @@ class FuncDefNode(StatNode, BlockNode):
# code.globalstate.use_utility_code(get_exception_tuple_utility_code)
# code.put_trace_exception()
if lenv.nogil:
if lenv.nogil and not lenv.has_with_gil_block:
code.putln("{")
code.put_ensure_gil()
code.put_add_traceback(self.entry.qualified_name)
if lenv.nogil:
if lenv.nogil and not lenv.has_with_gil_block:
code.put_release_ensured_gil()
code.putln("}")
else:
......@@ -1715,7 +1740,10 @@ class FuncDefNode(StatNode, BlockNode):
if ((acquire_gil or len(entry.cf_assignments) > 1) and
not entry.in_closure):
code.put_var_decref(entry)
if entry.type.is_memoryviewslice:
elif (entry.type.is_memoryviewslice and
(not is_cdef or len(entry.cf_assignments) > 1)):
# decref slices of def functions and acquired slices from cdef
# functions, but not borrowed slices from cdef functions.
code.put_xdecref_memoryviewslice(entry.cname,
have_gil=not lenv.nogil)
if self.needs_closure:
......@@ -1747,7 +1775,8 @@ class FuncDefNode(StatNode, BlockNode):
# GIL holding funcion
code.put_finish_refcount_context()
if acquire_gil or acquire_gil_for_var_decls_only:
if (acquire_gil or acquire_gil_for_var_decls_only or
acquire_gil_for_refnanny_only):
code.put_release_ensured_gil()
if not self.return_type.is_void:
......@@ -3152,7 +3181,7 @@ class DefNodeWrapper(FuncDefNode):
if self.signature.has_dummy_arg:
args.append(Naming.self_cname)
for arg in self.args:
if arg.hdr_type:
if arg.hdr_type and not (arg.type.is_memoryviewslice or arg.type.is_struct):
args.append(arg.type.cast_code(arg.entry.cname))
else:
args.append(arg.entry.cname)
......@@ -4724,10 +4753,28 @@ class SingleAssignmentNode(AssignmentNode):
self.lhs.analyse_target_declaration(env)
def analyse_types(self, env, use_temp = 0):
import ExprNodes
self.rhs.analyse_types(env)
self.lhs.analyse_target_types(env)
self.lhs.gil_assignment_check(env)
self.rhs = self.rhs.coerce_to(self.lhs.type, env)
if self.lhs.memslice_broadcast or self.rhs.memslice_broadcast:
self.lhs.memslice_broadcast = True
self.rhs.memslice_broadcast = True
is_index_node = isinstance(self.lhs, ExprNodes.IndexNode)
if (is_index_node and not self.rhs.type.is_memoryviewslice and
(self.lhs.memslice_slice or self.lhs.is_memslice_copy) and
(self.lhs.type.dtype.assignable_from(self.rhs.type) or
self.rhs.type.is_pyobject)):
# scalar slice assignment
self.lhs.is_memslice_scalar_assignment = True
dtype = self.lhs.type.dtype
else:
dtype = self.lhs.type
self.rhs = self.rhs.coerce_to(dtype, env)
if use_temp:
self.rhs = self.rhs.coerce_to_temp(env)
......@@ -5168,7 +5215,6 @@ class ReturnStatNode(StatNode):
lhs_pos=self.value.pos,
rhs=self.value,
code=code,
incref_rhs=self.value.is_name,
have_gil=self.in_nogil_context)
else:
self.value.make_owned_reference(code)
......
......@@ -67,9 +67,9 @@ old_style_globals = False
cimport_from_pyx = False
# max # of dims for buffers -- set to same value as max # of dims for numpy
# arrays.
buffer_max_dims = 32
# max # of dims for buffers -- set lower than number of dimensions in numpy, as
# slices are passed by value and involve a lot of copying
buffer_max_dims = 8
# Declare compiler directives
directive_defaults = {
......
......@@ -1778,6 +1778,9 @@ class AnalyseExpressionsTransform(CythonTransform):
if node.is_fused_index and node.type is not PyrexTypes.error_type:
node = node.base
elif node.memslice_ellipsis_noop:
# memoryviewslice[...] expression, drop the IndexNode
node = node.base
return node
......
......@@ -510,7 +510,7 @@ class MemoryViewSliceType(PyrexType):
return True
def declare_attribute(self, attribute, env):
def declare_attribute(self, attribute, env, pos):
import MemoryView, Options
scope = self.scope
......@@ -518,24 +518,24 @@ class MemoryViewSliceType(PyrexType):
if attribute == 'shape':
scope.declare_var('shape',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
None,
Options.buffer_max_dims),
pos,
cname='shape',
is_cdef=1)
elif attribute == 'strides':
scope.declare_var('strides',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
None,
Options.buffer_max_dims),
pos,
cname='strides',
is_cdef=1)
elif attribute == 'suboffsets':
scope.declare_var('suboffsets',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
None,
Options.buffer_max_dims),
pos,
cname='suboffsets',
is_cdef=1)
......@@ -544,40 +544,32 @@ class MemoryViewSliceType(PyrexType):
to_axes_c = [('direct', 'contig')]
to_axes_f = [('direct', 'contig')]
if ndim-1:
if ndim - 1:
to_axes_c = [('direct', 'follow')]*(ndim-1) + to_axes_c
to_axes_f = to_axes_f + [('direct', 'follow')]*(ndim-1)
to_memview_c = MemoryViewSliceType(self.dtype, to_axes_c)
to_memview_f = MemoryViewSliceType(self.dtype, to_axes_f)
cython_name_c, cython_name_f = "copy", "copy_fortran"
copy_name_c, copy_name_f = (
MemoryView.get_copy_func_name(to_memview_c),
MemoryView.get_copy_func_name(to_memview_f))
for (to_memview, cython_name, copy_name) in ((to_memview_c, cython_name_c, copy_name_c),
(to_memview_f, cython_name_f, copy_name_f)):
for to_memview, cython_name in [(to_memview_c, "copy"),
(to_memview_f, "copy_fortran")]:
entry = scope.declare_cfunction(cython_name,
CFuncType(self,
[CFuncTypeArg("memviewslice", self, None)]),
pos = None,
defining = 1,
cname = copy_name)
CFuncType(self, [CFuncTypeArg("memviewslice", self, None)]),
pos=pos,
defining=1,
cname=MemoryView.copy_c_or_fortran_cname(to_memview))
entry.utility_code_definition = \
MemoryView.CopyFuncUtilCode(self, to_memview)
#entry.utility_code_definition = \
env.use_utility_code(MemoryView.get_copy_new_utility(pos, self, to_memview))
MemoryView.use_cython_array_utility_code(env)
elif attribute in ("is_c_contig", "is_f_contig"):
# is_c_contig and is_f_contig functions
for (c_or_f, cython_name) in (('c', 'is_c_contig'), ('fortran', 'is_f_contig')):
for (c_or_f, cython_name) in (('c', 'is_c_contig'), ('f', 'is_f_contig')):
is_contig_name = \
MemoryView.get_is_contig_func_name(c_or_f)
MemoryView.get_is_contig_func_name(c_or_f, self.ndim)
cfunctype = CFuncType(
return_type=c_int_type,
......@@ -587,14 +579,12 @@ class MemoryViewSliceType(PyrexType):
entry = scope.declare_cfunction(cython_name,
cfunctype,
pos = None,
defining = 1,
cname = is_contig_name)
pos=pos,
defining=1,
cname=is_contig_name)
if attribute == 'is_c_contig':
entry.utility_code_definition = MemoryView.is_c_contig_utility
else:
entry.utility_code_definition = MemoryView.is_f_contig_utility
entry.utility_code_definition = MemoryView.get_is_contig_utility(
attribute == 'is_c_contig', self.ndim)
return True
......@@ -604,10 +594,6 @@ class MemoryViewSliceType(PyrexType):
def can_coerce_to_pyobject(self, env):
return True
#def global_init_code(self, entry, code):
# code.putln("%s.data = NULL;" % entry.cname)
# code.putln("%s.memview = NULL;" % entry.cname)
def check_for_null_code(self, cname):
return cname + '.memview'
......@@ -657,8 +643,9 @@ class MemoryViewSliceType(PyrexType):
to_py_func = "(PyObject *(*)(char *)) " + to_py_func
from_py_func = "(int (*)(char *, PyObject *)) " + from_py_func
tup = (obj.result(), self.ndim, to_py_func, from_py_func)
return "__pyx_memoryview_fromslice(&%s, %s, %s, %s);" % tup
tup = (obj.result(), self.ndim, to_py_func, from_py_func,
self.dtype.is_pyobject)
return "__pyx_memoryview_fromslice(&%s, %s, %s, %s, %d);" % tup
def dtype_object_conversion_funcs(self, env):
import MemoryView, Code
......
......@@ -13,12 +13,20 @@ cdef extern from "Python.h":
PyBUF_FORMAT
PyBUF_WRITABLE
ctypedef struct PyObject
PyObject *Py_None
void Py_INCREF(PyObject *)
void Py_INCREF(object)
void Py_DECREF(object)
cdef extern from *:
object __pyx_memoryview_new(object obj, int flags)
object __pyx_memoryview_new(object obj, int flags, bint dtype_is_object)
Py_ssize_t fill_contig_strides_array "__pyx_fill_contig_strides_array" (
Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
int ndim, char order) nogil
cdef void refcount_objects_in_slice "__pyx_memoryview_refcount_objects_in_slice" (
char *data, Py_ssize_t *shape, Py_ssize_t *strides, int ndim, bint inc)
@cname("__pyx_array")
cdef class array:
......@@ -36,10 +44,15 @@ cdef class array:
void (*callback_free_data)(void *data)
# cdef object _memview
cdef bint free_data
cdef bint dtype_is_object
def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format,
def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
mode=u"c", bint allocate_buffer=True):
cdef int idx
cdef Py_ssize_t i
cdef PyObject **p
self.ndim = len(shape)
self.itemsize = itemsize
......@@ -63,33 +76,26 @@ cdef class array:
free(self._strides)
raise MemoryError("unable to allocate shape or strides.")
cdef int idx
# cdef Py_ssize_t dim, stride
idx = 0
for dim in shape:
for idx, dim in enumerate(shape):
if dim <= 0:
raise ValueError("Invalid shape.")
raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
self._shape[idx] = dim
idx += 1
stride = itemsize
if mode == "fortran":
idx = 0
for dim in shape:
self._strides[idx] = stride
stride = stride * dim
idx += 1
elif mode == "c":
idx = self.ndim-1
for dim in shape[::-1]:
self._strides[idx] = stride
stride = stride * dim
idx -= 1
else:
if mode not in ("fortran", "c"):
raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
self.len = stride
cdef char order
if mode == 'fortran':
order = 'F'
else:
order = 'C'
self.len = fill_contig_strides_array(self._shape, self._strides,
itemsize, self.ndim, order)
decode = getattr(mode, 'decode', None)
if decode:
......@@ -97,11 +103,18 @@ cdef class array:
self.mode = mode
self.free_data = allocate_buffer
self.dtype_is_object = format == b'O'
if allocate_buffer:
self.data = <char *>malloc(self.len)
if not self.data:
raise MemoryError("unable to allocate array data.")
if self.dtype_is_object:
p = <PyObject **> self.data
for i in range(self.len / itemsize):
p[i] = Py_None
Py_INCREF(Py_None)
def __getbuffer__(self, Py_buffer *info, int flags):
cdef int bufmode = -1
if self.mode == b"c":
......@@ -130,20 +143,13 @@ cdef class array:
if self.callback_free_data != NULL:
self.callback_free_data(self.data)
elif self.free_data:
if self.dtype_is_object:
refcount_objects_in_slice(self.data, self._shape,
self._strides, self.ndim, False)
free(self.data)
self.data = NULL
if self._strides:
free(self._strides)
self._strides = NULL
if self._shape:
free(self._shape)
self._shape = NULL
self.format = NULL
self.itemsize = 0
free(self._strides)
free(self._shape)
property memview:
@cname('__pyx_cython_array_get_memview')
......@@ -155,7 +161,7 @@ cdef class array:
#return self._memview
flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
return __pyx_memoryview_new(self, flags)
return __pyx_memoryview_new(self, flags, self.dtype_is_object)
def __getattr__(self, attr):
......@@ -169,16 +175,20 @@ cdef class array:
@cname("__pyx_array_new")
cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, char *mode, char *buf):
cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format,
char *mode, char *buf):
cdef array result
if buf == NULL:
result = array(shape, itemsize, format, mode.decode('ASCII'))
else:
result = array(shape, itemsize, format, mode.decode('ASCII'), allocate_buffer=False)
result = array(shape, itemsize, format, mode.decode('ASCII'),
allocate_buffer=False)
result.data = buf
return result
#################### View.MemoryView ####################
import cython
......@@ -203,11 +213,9 @@ cdef extern from *:
int __Pyx_GetBuffer(object, Py_buffer *, int) except -1
void __Pyx_ReleaseBuffer(Py_buffer *)
void Py_INCREF(object)
void Py_DECREF(object)
void Py_XINCREF(object)
ctypedef struct PyObject
void Py_INCREF(PyObject *)
void Py_DECREF(PyObject *)
cdef struct __pyx_memoryview "__pyx_memoryview_obj":
Py_buffer view
......@@ -236,6 +244,27 @@ cdef extern from *:
PyBUF_WRITABLE
PyBUF_STRIDES
PyBUF_INDIRECT
PyBUF_RECORDS
cdef extern from *:
ctypedef int __pyx_atomic_int
{{memviewslice_name}} slice_copy_contig "__pyx_memoryview_copy_new_contig"(
__Pyx_memviewslice *from_mvs,
char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
bint dtype_is_object) nogil except *
bint slice_is_contig "__pyx_memviewslice_is_contig" (
{{memviewslice_name}} *mvs, char order, int ndim) nogil
bint slices_overlap "__pyx_slices_overlap" ({{memviewslice_name}} *slice1,
{{memviewslice_name}} *slice2,
int ndim, size_t itemsize) nogil
cdef extern from "stdlib.h":
void *malloc(size_t) nogil
void free(void *) nogil
void *memcpy(void *dest, void *src, size_t n) nogil
@cname('__pyx_MemviewEnum')
cdef class Enum(object):
......@@ -278,23 +307,29 @@ cdef class memoryview(object):
cdef object _size
cdef object _array_interface
cdef PyThread_type_lock lock
cdef int acquisition_count
cdef __pyx_atomic_int acquisition_count
cdef Py_buffer view
cdef int flags
cdef bint dtype_is_object
def __cinit__(memoryview self, object obj, int flags):
def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
self.obj = obj
self.flags = flags
if type(self) is memoryview or obj is not None:
__Pyx_GetBuffer(obj, &self.view, flags)
if <PyObject *> self.view.obj == NULL:
(<__pyx_buffer *> &self.view).obj = Py_None
Py_INCREF(None)
Py_INCREF(Py_None)
self.lock = PyThread_allocate_lock()
if self.lock == NULL:
raise MemoryError
if flags & PyBUF_FORMAT:
self.dtype_is_object = self.view.format == b'O'
else:
self.dtype_is_object = dtype_is_object
def __dealloc__(memoryview self):
if self.obj is not None:
__Pyx_ReleaseBuffer(&self.view)
......@@ -328,9 +363,68 @@ cdef class memoryview(object):
@cname('__pyx_memoryview_setitem')
def __setitem__(memoryview self, object index, object value):
have_slices, index = _unellipsify(index, self.view.ndim)
if have_slices:
raise NotImplementedError("Slice assignment not supported yet")
obj = self.is_slice(value)
if obj:
self.setitem_slice_assignment(self[index], obj)
else:
self.setitem_slice_assign_scalar(self[index], value)
else:
self.setitem_indexed(index, value)
cdef is_slice(self, obj):
if not isinstance(obj, memoryview):
try:
obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
self.dtype_is_object)
except TypeError:
return None
return obj
cdef setitem_slice_assignment(self, dst, src):
cdef {{memviewslice_name}} dst_slice
cdef {{memviewslice_name}} src_slice
memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
get_slice_from_memview(dst, &dst_slice)[0],
src.ndim, dst.ndim, self.dtype_is_object)
cdef setitem_slice_assign_scalar(self, dst, value):
cdef int array[128]
cdef void *tmp = NULL
cdef void *item
cdef {{memviewslice_name}} tmp_slice, *dst_slice
dst_slice = get_slice_from_memview(dst, &tmp_slice)
if self.view.itemsize > sizeof(array):
tmp = malloc(self.view.itemsize)
if tmp == NULL:
raise MemoryError
item = tmp
else:
item = <void *> array
if self.dtype_is_object:
(<PyObject **> item)[0] = <PyObject *> value
else:
try:
self.assign_item_from_object(<char *> item, value)
except:
free(tmp)
raise
# It would be easy to support indirect dimensions, but it's easier
# to disallow :)
if self.view.suboffsets != NULL:
assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
slice_assign_scalar(dst_slice, self.view.ndim, self.view.itemsize,
item, self.dtype_is_object)
free(tmp)
cdef setitem_indexed(self, index, value):
cdef char *itemp = self.get_item_pointer(index)
self.assign_item_from_object(itemp, value)
......@@ -457,30 +551,6 @@ cdef class memoryview(object):
return self._size
# The __array_interface__ is broken as it does not properly convert PEP 3118 format
# strings into NumPy typestrs. NumPy 1.5 support the new buffer interface though.
'''
property __array_interface__:
@cname('__pyx_numpy_array_interface')
def __get__(self):
"Interface for NumPy to obtain a ndarray from this object"
# Note: we always request writable buffers, so we set readonly to
# False in the data tuple
if self._array_interface is None:
for suboffset in self.suboffsets:
if suboffset >= 0:
raise ValueError("Cannot convert indirect buffer to numpy object")
self._array_interface = dict(
data = (PyLong_FromVoidPtr(self.view.buf), False),
shape = self.shape,
strides = self.strides,
typestr = "%s%d" % (self.view.format, self.view.itemsize),
version = 3)
return self._array_interface
'''
def __len__(self):
if self.view.ndim >= 1:
return self.view.shape[0]
......@@ -488,15 +558,51 @@ cdef class memoryview(object):
return 0
def __repr__(self):
return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, id(self))
return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
id(self))
def __str__(self):
return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
# Support the same attributes as memoryview slices
def is_c_contig(self):
cdef {{memviewslice_name}} *mslice, tmp
mslice = get_slice_from_memview(self, &tmp)
return slice_is_contig(mslice, 'C', self.view.ndim)
def is_f_contig(self):
cdef {{memviewslice_name}} *mslice, tmp
mslice = get_slice_from_memview(self, &tmp)
return slice_is_contig(mslice, 'F', self.view.ndim)
def copy(self):
cdef {{memviewslice_name}} mslice
cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
slice_copy(self, &mslice)
mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
self.view.itemsize,
flags|PyBUF_C_CONTIGUOUS,
self.dtype_is_object)
return memoryview_copy_from_slice(self, &mslice)
def copy_fortran(self):
cdef {{memviewslice_name}} src, dst
cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
slice_copy(self, &src)
dst = slice_copy_contig(&src, "fortran", self.view.ndim,
self.view.itemsize,
flags|PyBUF_F_CONTIGUOUS,
self.dtype_is_object)
return memoryview_copy_from_slice(self, &dst)
@cname('__pyx_memoryview_new')
cdef memoryview_cwrapper(object o, int flags):
return memoryview(o, flags)
cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object):
return memoryview(o, flags, dtype_is_object)
cdef tuple _unellipsify(object index, int ndim):
"""
......@@ -532,6 +638,12 @@ cdef tuple _unellipsify(object index, int ndim):
return have_slices or nslices, tuple(result)
cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
cdef int i
for i in range(ndim):
if suboffsets[i] >= 0:
raise ValueError("Indirect dimensions not supported")
#
### Slicing a memoryview
#
......@@ -575,7 +687,7 @@ cdef memoryview memview_slice(memoryview memview, object indices):
for dim, index in enumerate(indices):
if PyIndex_Check(index):
slice_memviewslice(p_src, p_dst, True, dim, new_ndim, p_suboffset_dim,
slice_memviewslice(p_src, p_dst, dim, new_ndim, p_suboffset_dim,
index, 0, 0, 0, 0, 0, False)
else:
start = index.start or 0
......@@ -586,7 +698,7 @@ cdef memoryview memview_slice(memoryview memview, object indices):
have_stop = index.stop is not None
have_step = index.step is not None
slice_memviewslice(p_src, p_dst, True, dim, new_ndim, p_suboffset_dim,
slice_memviewslice(p_src, p_dst, dim, new_ndim, p_suboffset_dim,
start, stop, step, have_start, have_stop, have_step,
True)
new_ndim += 1
......@@ -594,9 +706,11 @@ cdef memoryview memview_slice(memoryview memview, object indices):
if isinstance(memview, _memoryviewslice):
return memoryview_fromslice(&dst, new_ndim,
memviewsliceobj.to_object_func,
memviewsliceobj.to_dtype_func)
memviewsliceobj.to_dtype_func,
memview.dtype_is_object)
else:
return memoryview_fromslice(&dst, new_ndim, NULL, NULL)
return memoryview_fromslice(&dst, new_ndim, NULL, NULL,
memview.dtype_is_object)
#
......@@ -620,9 +734,8 @@ cdef extern from "pystate.h":
PyObject *PyErr_Format(PyObject *exc, char *msg, ...) nogil
@cname('__pyx_memoryview_slice_memviewslice')
cdef char *slice_memviewslice({{memviewslice_name}} *src,
cdef int slice_memviewslice({{memviewslice_name}} *src,
{{memviewslice_name}} *dst,
bint have_gil,
int dim,
int new_ndim,
int *suboffset_dim,
......@@ -632,11 +745,10 @@ cdef char *slice_memviewslice({{memviewslice_name}} *src,
int have_start,
int have_stop,
int have_step,
bint is_slice) nogil except *:
bint is_slice) nogil except -1:
"""
Create a new slice dst given slice src.
have_gil - if true, the GIL must be held and exceptions may be raised
dim - the current src dimension (indexing will make dimensions
disappear)
new_dim - the new dst dimension
......@@ -649,19 +761,6 @@ cdef char *slice_memviewslice({{memviewslice_name}} *src,
Py_ssize_t new_shape
bint negative_step
# Somehow these pointers are NULL when set as globals... this needs investigation
char *ERR_OOB = "Index out of bounds (axis %d)"
char *ERR_STEP = "Step must not be zero (axis %d)"
char *ERR_INDIRECT_GIL = ("Cannot make indirect dimension %d disappear "
"through indexing, consider slicing with %d:%d")
char *ERR_INDIRECT_NOGIL = ("Cannot make indirect dimension %d disappear "
"through indexing")
PyObject *exc = <PyObject *> IndexError
if have_gil:
# Assert the GIL
PyThreadState_Get()
shape = src.shape[dim]
stride = src.strides[dim]
suboffset = src.suboffsets[dim]
......@@ -671,19 +770,13 @@ cdef char *slice_memviewslice({{memviewslice_name}} *src,
if start < 0:
start += shape
if not 0 <= start < shape:
if have_gil:
PyErr_Format(exc, ERR_OOB, dim)
return ERR_OOB
_err_dim(IndexError, "Index out of bounds (axis %d)", dim)
else:
# index is a slice
negative_step = have_step != 0 and step < 0
if have_step and step == 0:
if have_gil:
# ValueError might be more appropriate, but this will make it consistent
# with nogil slicing
PyErr_SetString(exc, ERR_STEP)
return ERR_STEP
_err_dim(ValueError, "Step may not be zero (axis %d)", dim)
# check our bounds and set defaults
if have_start:
......@@ -741,13 +834,15 @@ cdef char *slice_memviewslice({{memviewslice_name}} *src,
if suboffset >= 0:
if not is_slice:
if have_gil:
PyErr_Format(exc, ERR_INDIRECT_GIL, dim, start, start + 1)
return ERR_INDIRECT_NOGIL
if new_ndim == 0:
dst.data = (<char **> dst.data)[0] + suboffset
else:
_err_dim(IndexError, "All dimensions preceding dimension %d "
"must be indexed and not sliced", dim)
else:
suboffset_dim[0] = new_ndim
return NULL
return 0
#
### Index a memoryview
......@@ -800,8 +895,7 @@ cdef int transpose_memslice({{memviewslice_name}} *memslice) nogil except 0:
shape[i], shape[j] = shape[j], shape[i]
if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
with gil:
raise ValueError("Cannot transpose memoryview with indirect dimensions")
_err(ValueError, "Cannot transpose memoryview with indirect dimensions")
return 1
......@@ -844,14 +938,16 @@ cdef class _memoryviewslice(memoryview):
cdef memoryview_fromslice({{memviewslice_name}} *memviewslice,
int ndim,
object (*to_object_func)(char *),
int (*to_dtype_func)(char *, object) except 0):
int (*to_dtype_func)(char *, object) except 0,
bint dtype_is_object):
cdef _memoryviewslice result
cdef int i
assert 0 < ndim <= memviewslice.memview.view.ndim, (ndim, memviewslice.memview.view.ndim)
# assert 0 < ndim <= memviewslice.memview.view.ndim, (
# ndim, memviewslice.memview.view.ndim)
result = _memoryviewslice(None, 0)
result = _memoryviewslice(None, 0, dtype_is_object)
result.from_slice = memviewslice[0]
__PYX_INC_MEMVIEW(memviewslice, 1)
......@@ -862,7 +958,9 @@ cdef memoryview_fromslice({{memviewslice_name}} *memviewslice,
result.view.buf = <void *> memviewslice.data
result.view.ndim = ndim
(<__pyx_buffer *> &result.view).obj = Py_None
Py_INCREF(None)
Py_INCREF(Py_None)
result.flags = PyBUF_RECORDS
result.view.shape = <Py_ssize_t *> result.from_slice.shape
result.view.strides = <Py_ssize_t *> result.from_slice.strides
......@@ -877,26 +975,52 @@ cdef memoryview_fromslice({{memviewslice_name}} *memviewslice,
return result
@cname('__pyx_memoryview_get_slice_from_memoryview')
cdef {{memviewslice_name}} *get_slice_from_memview(memoryview memview,
{{memviewslice_name}} *mslice):
cdef _memoryviewslice obj
if isinstance(memview, _memoryviewslice):
obj = memview
return &obj.from_slice
else:
slice_copy(memview, mslice)
return mslice
@cname('__pyx_memoryview_slice_copy')
cdef void slice_copy(memoryview memview, {{memviewslice_name}} *dst):
cdef int dim
cdef Py_ssize_t *shape, *strides, *suboffsets
shape = memview.view.shape
strides = memview.view.strides
suboffsets = memview.view.suboffsets
dst.memview = <__pyx_memoryview *> memview
dst.data = <char *> memview.view.buf
for dim in range(memview.view.ndim):
dst.shape[dim] = memview.view.shape[dim]
dst.strides[dim] = memview.view.strides[dim]
dst.suboffsets[dim] = memview.view.suboffsets[dim]
dst.shape[dim] = shape[dim]
dst.strides[dim] = strides[dim]
if suboffsets == NULL:
dst.suboffsets[dim] = -1
else:
dst.suboffsets[dim] = suboffsets[dim]
@cname('__pyx_memoryview_copy')
@cname('__pyx_memoryview_copy_object')
cdef memoryview_copy(memoryview memview):
"Create a new memoryview object"
cdef {{memviewslice_name}} memviewslice
slice_copy(memview, &memviewslice)
return memoryview_copy_from_slice(memview, &memviewslice)
@cname('__pyx_memoryview_copy_object_from_slice')
cdef memoryview_copy_from_slice(memoryview memview, {{memviewslice_name}} *memviewslice):
"""
Create a new memoryview object from a given memoryview object and slice.
"""
cdef object (*to_object_func)(char *)
cdef int (*to_dtype_func)(char *, object) except 0
slice_copy(memview, &memviewslice)
if isinstance(memview, _memoryviewslice):
to_object_func = (<_memoryviewslice> memview).to_object_func
to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
......@@ -904,8 +1028,329 @@ cdef memoryview_copy(memoryview memview):
to_object_func = NULL
to_dtype_func = NULL
return memoryview_fromslice(&memviewslice, memview.view.ndim,
to_object_func, to_dtype_func)
return memoryview_fromslice(memviewslice, memview.view.ndim,
to_object_func, to_dtype_func,
memview.dtype_is_object)
#
### Copy the contents of a memoryview slices
#
cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
if arg < 0:
return -arg
else:
return arg
@cname('__pyx_get_best_slice_order')
cdef char get_best_order({{memviewslice_name}} *mslice, int ndim) nogil:
"""
Figure out the best memory access order for a given slice.
"""
cdef int i
cdef Py_ssize_t c_stride = 0
cdef Py_ssize_t f_stride = 0
for i in range(ndim - 1, -1, -1):
if mslice.shape[i] > 1:
c_stride = mslice.strides[i]
break
for i in range(ndim):
if mslice.shape[i] > 1:
f_stride = mslice.strides[i]
break
if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
return 'C'
else:
return 'F'
@cython.cdivision(True)
cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides,
char *dst_data, Py_ssize_t *dst_strides,
Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
int ndim, size_t itemsize) nogil:
# Note: src_extent is 1 if we're broadcasting
# dst_extent always >= src_extent as we don't do reductions
cdef Py_ssize_t i
cdef Py_ssize_t src_extent = src_shape[0]
cdef Py_ssize_t dst_extent = dst_shape[0]
cdef Py_ssize_t src_stride = src_strides[0]
cdef Py_ssize_t dst_stride = dst_strides[0]
if ndim == 1:
if (src_stride > 0 and dst_stride > 0 and
<size_t> src_stride == itemsize == <size_t> dst_stride):
memcpy(dst_data, src_data, itemsize * dst_extent)
else:
for i in range(dst_extent):
memcpy(dst_data, src_data, itemsize)
src_data += src_stride
dst_data += dst_stride
else:
for i in range(dst_extent):
_copy_strided_to_strided(src_data, src_strides + 1,
dst_data, dst_strides + 1,
src_shape + 1, dst_shape + 1,
ndim - 1, itemsize)
src_data += src_stride
dst_data += dst_stride
cdef void copy_strided_to_strided({{memviewslice_name}} *src,
{{memviewslice_name}} *dst,
int ndim, size_t itemsize) nogil:
_copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides,
src.shape, dst.shape, ndim, itemsize)
@cname('__pyx_memoryview_slice_get_size')
cdef Py_ssize_t slice_get_size({{memviewslice_name}} *src, int ndim) nogil:
"Return the size of the memory occupied by the slice in number of bytes"
cdef int i
cdef Py_ssize_t size = src.memview.view.itemsize
for i in range(ndim):
size *= src.shape[i]
return size
@cname('__pyx_fill_contig_strides_array')
cdef Py_ssize_t fill_contig_strides_array(
Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
int ndim, char order) nogil:
"""
Fill the strides array for a slice with C or F contiguous strides.
This is like PyBuffer_FillContiguousStrides, but compatible with py < 2.6
"""
cdef int idx
if order == 'F':
for idx in range(ndim):
strides[idx] = stride
stride = stride * shape[idx]
else:
for idx in range(ndim - 1, -1, -1):
strides[idx] = stride
stride = stride * shape[idx]
return stride
@cname('__pyx_memoryview_copy_data_to_temp')
cdef void *copy_data_to_temp({{memviewslice_name}} *src,
{{memviewslice_name}} *tmpslice,
char order,
int ndim) nogil except NULL:
"""
Copy a direct slice to temporary contiguous memory. The caller should free
the result when done.
"""
cdef int i
cdef void *result
cdef size_t itemsize = src.memview.view.itemsize
cdef size_t size = slice_get_size(src, ndim)
result = malloc(size)
if not result:
_err(MemoryError, NULL)
# tmpslice[0] = src
tmpslice.data = <char *> result
tmpslice.memview = src.memview
for i in range(ndim):
tmpslice.shape[i] = src.shape[i]
tmpslice.suboffsets[i] = -1
fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
ndim, order)
# We need to broadcast strides again
for i in range(ndim):
if tmpslice.shape[i] == 1:
tmpslice.strides[i] = 0
if slice_is_contig(src, order, ndim):
memcpy(result, src.data, size)
else:
copy_strided_to_strided(src, tmpslice, ndim, itemsize)
return result
# Use 'with gil' functions and avoid 'with gil' blocks, as the code within the blocks
# has temporaries that need the GIL to clean up
@cname('__pyx_memoryview_err_extents')
cdef int _err_extents(int i, Py_ssize_t extent1,
Py_ssize_t extent2) except -1 with gil:
raise ValueError("got differing extents in dimension %d (got %d and %d)" %
(i, extent1, extent2))
@cname('__pyx_memoryview_err_dim')
cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
raise error(msg.decode('ascii') % dim)
@cname('__pyx_memoryview_err')
cdef int _err(object error, char *msg) except -1 with gil:
if msg != NULL:
raise error(msg.decode('ascii'))
else:
raise error
@cname('__pyx_memoryview_copy_contents')
cdef int memoryview_copy_contents({{memviewslice_name}} src,
{{memviewslice_name}} dst,
int src_ndim, int dst_ndim,
bint dtype_is_object) nogil except -1:
"""
Copy memory from slice src to slice dst.
Check for overlapping memory and verify the shapes.
"""
cdef void *tmpdata = NULL
cdef size_t itemsize = src.memview.view.itemsize
cdef int i
cdef char order = get_best_order(&src, src_ndim)
cdef bint broadcasting = False
cdef bint direct_copy = False
cdef {{memviewslice_name}} tmp
if src_ndim < dst_ndim:
broadcast_leading(&src, src_ndim, dst_ndim)
elif dst_ndim < src_ndim:
broadcast_leading(&dst, dst_ndim, src_ndim)
cdef int ndim = max(src_ndim, dst_ndim)
for i in range(ndim):
if src.shape[i] != dst.shape[i]:
if src.shape[i] == 1:
broadcasting = True
src.strides[i] = 0
else:
_err_extents(i, dst.shape[i], src.shape[i])
if src.suboffsets[i] >= 0:
_err_dim(ValueError, "Dimension %d is not direct", i)
if slices_overlap(&src, &dst, ndim, itemsize):
# slices overlap, copy to temp, copy temp to dst
if not slice_is_contig(&src, order, ndim):
order = get_best_order(&dst, ndim)
tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
src = tmp
if not broadcasting:
# See if both slices have equal contiguity, in that case perform a
# direct copy. This only works when we are not broadcasting.
if slice_is_contig(&src, 'C', ndim):
direct_copy = slice_is_contig(&dst, 'C', ndim)
elif slice_is_contig(&src, 'F', ndim):
direct_copy = slice_is_contig(&dst, 'F', ndim)
if direct_copy:
# Contiguous slices with same order
refcount_copying(&dst, dtype_is_object, ndim, False)
memcpy(dst.data, src.data, slice_get_size(&src, ndim))
refcount_copying(&dst, dtype_is_object, ndim, True)
return 0
if order == 'F' == get_best_order(&dst, ndim):
# see if both slices have Fortran order, transpose them to match our
# C-style indexing order
transpose_memslice(&src)
transpose_memslice(&dst)
refcount_copying(&dst, dtype_is_object, ndim, False)
copy_strided_to_strided(&src, &dst, ndim, itemsize)
refcount_copying(&dst, dtype_is_object, ndim, True)
free(tmpdata)
return 0
@cname('__pyx_memoryview_broadcast_leading')
cdef void broadcast_leading({{memviewslice_name}} *slice,
int ndim,
int ndim_other) nogil:
cdef int i
cdef int offset = ndim_other - ndim
for i in range(ndim - 1, -1, -1):
slice.shape[i + offset] = slice.shape[i]
slice.strides[i + offset] = slice.strides[i]
slice.suboffsets[i + offset] = slice.suboffsets[i]
for i in range(offset):
slice.shape[i] = 1
slice.strides[i] = slice.strides[0]
slice.suboffsets[i] = -1
#
### Take care of refcounting the objects in slices. Do this seperately from any copying,
### to minimize acquiring the GIL
#
@cname('__pyx_memoryview_refcount_copying')
cdef void refcount_copying({{memviewslice_name}} *dst, bint dtype_is_object,
int ndim, bint inc) nogil:
# incref or decref the objects in the destination slice if the dtype is
# object
if dtype_is_object:
refcount_objects_in_slice_with_gil(dst.data, dst.shape,
dst.strides, ndim, inc)
@cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape,
Py_ssize_t *strides, int ndim,
bint inc) with gil:
refcount_objects_in_slice(data, shape, strides, ndim, inc)
@cname('__pyx_memoryview_refcount_objects_in_slice')
cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape,
Py_ssize_t *strides, int ndim, bint inc):
cdef Py_ssize_t i
for i in range(shape[0]):
if ndim == 1:
if inc:
Py_INCREF((<PyObject **> data)[0])
else:
Py_DECREF((<PyObject **> data)[0])
else:
refcount_objects_in_slice(data, shape + 1, strides + 1,
ndim - 1, inc)
data += strides[0]
#
### Scalar to slice assignment
#
@cname('__pyx_memoryview_slice_assign_scalar')
cdef void slice_assign_scalar({{memviewslice_name}} *dst, int ndim,
size_t itemsize, void *item,
bint dtype_is_object) nogil:
refcount_copying(dst, dtype_is_object, ndim, False)
_slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
itemsize, item)
refcount_copying(dst, dtype_is_object, ndim, True)
@cname('__pyx_memoryview__slice_assign_scalar')
cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape,
Py_ssize_t *strides, int ndim,
size_t itemsize, void *item) nogil:
cdef Py_ssize_t i
cdef Py_ssize_t stride = strides[0]
cdef Py_ssize_t extent = shape[0]
if ndim == 1:
for i in range(extent):
memcpy(data, item, itemsize)
data += stride
else:
for i in range(extent):
_slice_assign_scalar(data, shape + 1, strides + 1,
ndim - 1, itemsize, item)
data += stride
############### BufferFormatFromTypeInfo ###############
cdef extern from *:
......
......@@ -3,6 +3,8 @@
/* memoryview slice struct */
struct {{memview_struct_name}};
typedef struct {
struct {{memview_struct_name}} *memview;
char *data;
......@@ -11,6 +13,114 @@ typedef struct {
Py_ssize_t suboffsets[{{max_dims}}];
} {{memviewslice_name}};
/////////// Atomics.proto /////////////
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
/* todo: Portland pgcc, maybe OS X's OSAtomicIncrement32,
libatomic + autotools-like distutils support? Such a pain... */
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 || \
(__GNUC_MINOR__ == 1 && __GNUC_PATHLEVEL >= 2))
/* gcc >= 4.1.2 */
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && MSC_VER
/* msvc */
#include <Windows.h>
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using MSVC atomics"
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
static CYTHON_INLINE __pyx_atomic_int_type
__pyx_atomic_incr_maybealigned(__pyx_atomic_int *value, PyThread_type_lock lock);
static CYTHON_INLINE __pyx_atomic_int_type
__pyx_atomic_decr_maybealigned(__pyx_atomic_int *value, PyThread_type_lock lock);
#define __pyx_add_acquisition_count(memview) \
__pyx_atomic_incr_maybealigned(&memview->acquisition_count, memview->lock)
#define __pyx_sub_acquisition_count(memview) \
__pyx_atomic_decr_maybealigned(&memview->acquisition_count, memview->lock)
#else
#define __pyx_add_acquisition_count(memview) \
__pyx_add_acquisition_count_locked(&memview->acquisition_count, memview->lock)
#define __pyx_sub_acquisition_count(memview) \
__pyx_sub_acquisition_count_locked(&memview->acquisition_count, memview->lock)
#endif
////////// Atomics //////////
#if CYTHON_ATOMICS
#define __pyx_check_unaligned(type, pointer) \
(((type) pointer) & (sizeof(pointer) - 1))
static CYTHON_INLINE int
__pyx_atomic_unaligned(__pyx_atomic_int *p)
{
/* uintptr_t is optional in C99, try other stuff */
if (sizeof(unsigned long) >= sizeof(p))
return __pyx_check_unaligned(unsigned long, p);
else if (sizeof(size_t) >= sizeof(p))
return __pyx_check_unaligned(size_t, p);
#if __STDC_VERSION__ >= 199901L
if (sizeof(unsigned long long) >= sizeof(p))
return __pyx_check_unaligned(unsigned long long, p);
#endif
return 1;
}
static CYTHON_INLINE __pyx_atomic_int_type
__pyx_atomic_incr_maybealigned(__pyx_atomic_int *value, PyThread_type_lock lock)
{
if (unlikely(__pyx_atomic_unaligned(value)))
return __pyx_add_acquisition_count_locked(value, lock);
else
return __pyx_atomic_incr_aligned(value, lock);
}
static CYTHON_INLINE __pyx_atomic_int_type
__pyx_atomic_decr_maybealigned(__pyx_atomic_int *value, PyThread_type_lock lock)
{
if (unlikely(__pyx_atomic_unaligned(value)))
return __pyx_sub_acquisition_count_locked(value, lock);
else
return __pyx_atomic_decr_aligned(value, lock);
}
#endif
/////////////// ObjectToMemviewSlice.proto ///////////////
static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *);
......@@ -37,7 +147,10 @@ static int __Pyx_init_memviewslice(
int ndim,
__Pyx_memviewslice *memviewslice);
static int CYTHON_INLINE __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock);
static int CYTHON_INLINE __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock);
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW({{memviewslice_name}} *, int, int);
......@@ -52,7 +165,7 @@ static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *obj) {
{{memviewslice_name}} result = {{memslice_init}};
struct __pyx_memoryview_obj *memview = \
(struct __pyx_memoryview_obj *) __pyx_memoryview_new(obj, {{buf_flag}});
(struct __pyx_memoryview_obj *) __pyx_memoryview_new(obj, {{buf_flag}}, 0);
__Pyx_BufFmt_StackElem stack[{{struct_nesting_depth}}];
int axes_specs[] = { {{axes_specs}} };
int retcode;
......@@ -112,8 +225,8 @@ static int __Pyx_ValidateAndInit_memviewslice(
if ((unsigned)buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" PY_FORMAT_SIZE_T "d byte%s) "
"does not match size of '%s' (%" PY_FORMAT_SIZE_T "d byte%s)",
"Item size of buffer (%" PY_FORMAT_SIZE_T "u byte%s) "
"does not match size of '%s' (%" PY_FORMAT_SIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
......@@ -146,7 +259,10 @@ static int __Pyx_ValidateAndInit_memviewslice(
}
if (spec & (__Pyx_MEMVIEW_STRIDED | __Pyx_MEMVIEW_FOLLOW)) {
if (buf->strides[i] < buf->itemsize) {
Py_ssize_t stride = buf->strides[i];
if (stride < 0)
stride = -stride;
if (stride < buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous in the same dimension.");
goto fail;
......@@ -210,11 +326,11 @@ no_fail:
return retval;
}
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice) {
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
{{memviewslice_name}} *memviewslice)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
......@@ -242,7 +358,7 @@ static int __Pyx_init_memviewslice(
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
memview->acquisition_count++;
__pyx_add_acquisition_count(memview);
retval = 0;
goto no_fail;
......@@ -275,8 +391,32 @@ static CYTHON_INLINE void __pyx_fatalerror(const char *fmt, ...) {
va_end(vargs);
}
static CYTHON_INLINE void __Pyx_INC_MEMVIEW({{memviewslice_name}} *memslice,
int have_gil, int lineno) {
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW({{memviewslice_name}} *memslice, int have_gil, int lineno)
{
int first_time;
struct {{memview_struct_name}} *memview = memslice->memview;
if (!memview)
......@@ -286,9 +426,7 @@ static CYTHON_INLINE void __Pyx_INC_MEMVIEW({{memviewslice_name}} *memslice,
__pyx_fatalerror("Acquisition count is %d (line %d)",
memview->acquisition_count, lineno);
PyThread_acquire_lock(memview->lock, 1);
first_time = (memview->acquisition_count++ == 0);
PyThread_release_lock(memview->lock);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (first_time) {
if (have_gil) {
......@@ -313,10 +451,7 @@ static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW({{memviewslice_name}} *memslice,
__pyx_fatalerror("Acquisition count is %d (line %d)",
memview->acquisition_count, lineno);
PyThread_acquire_lock(memview->lock, 1);
last_time = (memview->acquisition_count-- == 1);
PyThread_release_lock(memview->lock);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (last_time) {
if (have_gil) {
......@@ -331,32 +466,49 @@ static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW({{memviewslice_name}} *memslice,
}
}
////////// MemviewSliceCopyTemplate //////////
static __Pyx_memviewslice {{copy_name}}(const __Pyx_memviewslice from_mvs) {
////////// MemviewSliceCopyTemplate.proto //////////
static {{memviewslice_name}}
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
////////// MemviewSliceCopyTemplate //////////
static {{memviewslice_name}}
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = {{memslice_init}};
struct __pyx_memoryview_obj *from_memview = from_mvs.memview;
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = 0;
PyObject *temp_int = 0;
struct __pyx_array_obj *array_obj = 0;
struct __pyx_memoryview_obj *memview_obj = 0;
char *mode = (char *) "{{mode}}";
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("{{copy_name}}");
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig");
shape_tuple = PyTuple_New((Py_ssize_t)(buf->ndim));
if(unlikely(!shape_tuple)) {
for (i = 0; i < ndim; i++) {
if (from_mvs->suboffsets[i] >= 0) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i=0; i<buf->ndim; i++) {
temp_int = PyInt_FromLong(buf->shape[i]);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromLong(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
......@@ -364,36 +516,32 @@ static __Pyx_memviewslice {{copy_name}}(const __Pyx_memviewslice from_mvs) {
}
}
array_obj = __pyx_array_new(shape_tuple, {{sizeof_dtype}}, buf->format, mode, NULL);
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, {{contig_flag}});
if (unlikely(!memview_obj)) {
(PyObject *) array_obj, contig_flag,
dtype_is_object);
if (unlikely(!memview_obj))
goto fail;
}
/* initialize new_mvs */
if (unlikely(-1 == __Pyx_init_memviewslice(memview_obj, buf->ndim, &new_mvs))) {
PyErr_SetString(PyExc_RuntimeError,
"Could not initialize new memoryviewslice object.");
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs) < 0))
goto fail;
}
if (unlikely(-1 == {{copy_contents_name}}(&from_mvs, &new_mvs))) {
/* PyErr_SetString(PyExc_RuntimeError,
"Could not copy contents of memoryview slice."); */
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
}
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview); new_mvs.memview = 0;
new_mvs.data = 0;
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple); shape_tuple = 0;
__Pyx_GOTREF(temp_int);
......@@ -401,27 +549,85 @@ no_fail:
__Pyx_XDECREF(array_obj); array_obj = 0;
__Pyx_RefNannyFinishContext();
return new_mvs;
}
////////// CopyContentsUtility.proto /////////
#define {{func_cname}}(slice) \
__pyx_memoryview_copy_new_contig(&slice, "{{mode}}", {{ndim}}, \
sizeof({{dtype_decl}}), {{contig_flag}}, \
{{dtype_is_object}})
////////// OverlappingSlices.proto //////////
static int __pyx_slices_overlap({{memviewslice_name}} *slice1,
{{memviewslice_name}} *slice2,
int ndim, size_t itemsize);
////////// OverlappingSlices //////////
/* Based on numpy's core/src/multiarray/array_assign.c */
/* Gets a half-open range [start, end) which contains the array data */
static void
__pyx_get_array_memory_extents({{memviewslice_name}} *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
/* Return a half-open range */
*out_start = start;
*out_end = end + itemsize;
}
/* Returns 1 if the arrays have overlapping data, 0 otherwise */
static int
__pyx_slices_overlap({{memviewslice_name}} *slice1,
{{memviewslice_name}} *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
////////// MemviewSliceIsCContig.proto //////////
//@requires MemviewSliceIsContig
static int __pyx_memviewslice_is_c_contig(const {{memviewslice_name}});
#define __pyx_memviewslice_is_c_contig{{ndim}}(slice) \
__pyx_memviewslice_is_contig(&slice, 'C', {{ndim}})
////////// MemviewSliceIsFContig.proto //////////
//@requires MemviewSliceIsContig
static int __pyx_memviewslice_is_fortran_contig(const {{memviewslice_name}});
#define __pyx_memviewslice_is_f_contig{{ndim}}(slice) \
__pyx_memviewslice_is_contig(&slice, 'F', {{ndim}})
////////// MemviewSliceIsContig.proto //////////
static int __pyx_memviewslice_is_contig(const {{memviewslice_name}} *mvs,
char order);
char order, int ndim);
////////// MemviewSliceIsContig //////////
static int __pyx_memviewslice_is_contig(const {{memviewslice_name}} *mvs,
char order) {
static int
__pyx_memviewslice_is_contig(const {{memviewslice_name}} *mvs,
char order, int ndim)
{
int i, index, step, start;
int ndim = mvs->memview->view.ndim;
Py_ssize_t itemsize = mvs->memview->view.itemsize;
if (order == 'F') {
......@@ -443,16 +649,6 @@ static int __pyx_memviewslice_is_contig(const {{memviewslice_name}} *mvs,
return 1;
}
////////// MemviewSliceIsCContig //////////
static int __pyx_memviewslice_is_c_contig(const {{memviewslice_name}} mvs) {
return __pyx_memviewslice_is_contig(&mvs, 'C');
}
////////// MemviewSliceIsFContig //////////
static int __pyx_memviewslice_is_fortran_contig(const {{memviewslice_name}} mvs) {
return __pyx_memviewslice_is_contig(&mvs, 'F');
}
/////////////// MemviewSliceIndex ///////////////
static CYTHON_INLINE char *
......@@ -511,3 +707,124 @@ int {{set_function}}(const char *itemp, PyObject *obj) {
*(PyObject **) itemp = obj;
return 1;
}
/////////// ToughSlice //////////
/* Dimension is indexed with 'start:stop:step' */
if (unlikely(__pyx_memoryview_slice_memviewslice(
&{{src}},
&{{dst}},
{{dim}},
{{new_ndim}},
&{{suboffset_dim}},
{{start}},
{{stop}},
{{step}},
{{int(have_start)}},
{{int(have_stop)}},
{{int(have_step)}},
1) < 0))
{
{{error_goto}}
}
////////// SimpleSlice //////////
/* Dimension is indexed with ':' only */
{{dst}}.shape[{{new_ndim}}] = {{src}}.shape[{{dim}}];
{{dst}}.strides[{{new_ndim}}] = {{src}}.strides[{{dim}}];
{{if access == 'direct'}}
{{dst}}.suboffsets[{{new_ndim}}] = -1;
{{else}}
{{dst}}.suboffsets[{{new_ndim}}] = {{src}}.suboffsets[{{dim}}];
if ({{src}}.suboffsets[{{dim}}] >= 0)
{{suboffset_dim}} = {{new_ndim}};
{{endif}}
////////// SliceIndex //////////
/* Dimension is indexed with an integer, we could use the ToughSlice */
/* approach, but this is faster */
{
Py_ssize_t __pyx_tmp_idx = {{idx}};
Py_ssize_t __pyx_tmp_shape = {{src}}.shape[{{dim}}];
Py_ssize_t __pyx_tmp_stride = {{src}}.strides[{{dim}}];
if (__pyx_tmp_idx < 0)
__pyx_tmp_idx += __pyx_tmp_shape;
if (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape) {
{{if not have_gil}}
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
{{endif}}
PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis {{dim}})");
{{if not have_gil}}
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{{endif}}
{{error_goto}}
}
{{if all_dimensions_direct}}
{{dst}}.data += __pyx_tmp_idx * __pyx_tmp_stride;
{{else}}
if ({{suboffset_dim}} < 0) {
{{dst}}.data += __pyx_tmp_idx * __pyx_tmp_stride;
/* This dimension is the first dimension, or is preceded by */
/* direct or indirect dimensions that are indexed away. */
/* Hence suboffset_dim must be less than zero, and we can have */
/* our data pointer refer to another block by dereferencing. */
/* slice.data -> B -> C becomes slice.data -> C */
{{if indirect}}
{
Py_ssize_t __pyx_tmp_suboffset = {{src}}.suboffsets[{{dim}}];
{{if generic}}
if (__pyx_tmp_suboffset >= 0)
{{endif}}
{{dst}}.data = *((char **) {{dst}}.data) + __pyx_tmp_suboffset;
}
{{endif}}
} else {
{{dst}}.suboffsets[{{suboffset_dim}}] += __pyx_tmp_idx * __pyx_tmp_stride;
/* Note: dimension can not be indirect, the compiler will have */
/* issued an error */
}
{{endif}}
}
////////// FillStrided1DScalar.proto //////////
static void
__pyx_fill_slice_{{dtype_name}}({{type_decl}} *p, Py_ssize_t extent, Py_ssize_t stride,
size_t itemsize, void *itemp);
////////// FillStrided1DScalar //////////
/* Fill a slice with a scalar value. The dimension is direct and strided or contiguous */
static void
__pyx_fill_slice_{{dtype_name}}({{type_decl}} *p, Py_ssize_t extent, Py_ssize_t stride,
size_t itemsize, void *itemp)
{
Py_ssize_t i;
{{type_decl}} item = *(({{type_decl}} *) itemp);
{{type_decl}} *endp;
stride /= sizeof({{type_decl}});
endp = p + stride * extent;
while (p < endp) {
*p = item;
p += stride;
}
}
......@@ -71,6 +71,8 @@
#define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
#define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
#define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
#define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE)
#define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE)
#endif
......
......@@ -652,6 +652,11 @@ def test_indirect_slicing(arg):
(5, 3, 2)
0 0 -1
58
56
58
58
58
58
released A
>>> test_indirect_slicing(IntMockBuffer("A", shape_9_14_21_list, shape=(9, 14, 21)))
......@@ -659,6 +664,11 @@ def test_indirect_slicing(arg):
(5, 14, 3)
0 16 -1
2412
2410
2412
2412
2412
2412
released A
"""
cdef int[::view.indirect, ::view.indirect, :] _a = arg
......@@ -669,6 +679,11 @@ def test_indirect_slicing(arg):
print_int_offsets(*b.suboffsets)
print b[4, 2, 1]
print b[..., 0][4, 2]
print b[..., 1][4, 2]
print b[..., 1][4][2]
print b[4][2][1]
print b[4, 2][1]
def test_direct_slicing(arg):
"""
......
......@@ -74,6 +74,37 @@ def test_copy_to():
print to_data[i],
print
@testcase
def test_overlapping_copy():
"""
>>> test_overlapping_copy()
"""
cdef int i, array[10]
for i in range(10):
array[i] = i
cdef int[:] slice = array
slice[...] = slice[::-1]
for i in range(10):
assert slice[i] == 10 - 1 - i
@testcase
def test_partly_overlapping():
"""
>>> test_partly_overlapping()
"""
cdef int i, array[10]
for i in range(10):
array[i] = i
cdef int[:] slice = array
cdef int[:] slice2 = slice[:5]
slice2[...] = slice[4:9]
for i in range(5):
assert slice2[i] == i + 4
@testcase
@cython.nonecheck(True)
def test_nonecheck1():
......@@ -139,11 +170,11 @@ def test_copy_mismatch():
u'''
>>> test_copy_mismatch()
Traceback (most recent call last):
...
ValueError: memoryview shapes not the same in dimension 0
...
ValueError: got differing extents in dimension 0 (got 2 and 3)
'''
cdef int[:,:,::1] mv1 = array((2,2,3), sizeof(int), 'i')
cdef int[:,:,::1] mv2 = array((1,2,3), sizeof(int), 'i')
cdef int[:,:,::1] mv2 = array((3,2,3), sizeof(int), 'i')
mv1[...] = mv2
......
......@@ -1240,6 +1240,13 @@ def test_indirect_slicing(arg):
0 0 -1
58
56
58
index away indirect
58
58
index away generic
58
58
released A
>>> test_indirect_slicing(IntMockBuffer("A", shape_9_14_21_list, shape=(9, 14, 21)))
......@@ -1248,12 +1255,27 @@ def test_indirect_slicing(arg):
0 16 -1
2412
2410
2412
index away indirect
2412
2412
index away generic
2412
2412
released A
"""
cdef int[::view.indirect, ::view.indirect, :] a = arg
cdef int[::view.indirect, ::view.indirect, :] b = a[-5:, ..., -5:100:2]
cdef int[::view.generic , :: view.generic, :] generic_b = a[-5:, ..., -5:100:2]
cdef int[::view.indirect, ::view.indirect] c = b[..., 0]
# try indexing away leading indirect dimensions
cdef int[::view.indirect, :] d = b[4]
cdef int[:] e = b[4, 2]
cdef int[::view.generic, :] generic_d = generic_b[4]
cdef int[:] generic_e = generic_b[4, 2]
print b.shape[0], b.shape[1], b.shape[2]
print b.suboffsets[0] // sizeof(int *),
print b.suboffsets[1] // sizeof(int),
......@@ -1261,6 +1283,90 @@ def test_indirect_slicing(arg):
print b[4, 2, 1]
print c[4, 2]
# test adding offset from last dimension to suboffset
print b[..., 1][4, 2]
print "index away indirect"
print d[2, 1]
print e[1]
print "index away generic"
print generic_d[2, 1]
print generic_e[1]
cdef class TestIndexSlicingDirectIndirectDims(object):
"Test a int[:, ::view.indirect, :] slice"
cdef Py_ssize_t[3] shape, strides, suboffsets
cdef int c_array[5]
cdef int *myarray[5][5]
cdef bytes format
def __init__(self):
cdef int i
self.c_array[3] = 20
self.myarray[1][2] = self.c_array
for i in range(3):
self.shape[i] = 5
self.strides[0] = sizeof(int *) * 5
self.strides[1] = sizeof(int *)
self.strides[2] = sizeof(int)
self.suboffsets[0] = -1
self.suboffsets[1] = 0
self.suboffsets[2] = -1
self.format = b"i"
def __getbuffer__(self, Py_buffer *info, int flags):
info.buf = <void *> self.myarray
info.len = 5 * 5 * 5
info.ndim = 3
info.shape = self.shape
info.strides = self.strides
info.suboffsets = self.suboffsets
info.itemsize = sizeof(int)
info.readonly = 0
info.obj = self
info.format = self.format
@testcase
def test_index_slicing_away_direct_indirect():
"""
>>> test_index_slicing_away_direct_indirect()
20
20
20
20
<BLANKLINE>
20
20
20
20
All dimensions preceding dimension 1 must be indexed and not sliced
"""
cdef int[:, ::view.indirect, :] a = TestIndexSlicingDirectIndirectDims()
a_obj = a
print a[1][2][3]
print a[1, 2, 3]
print a[1, 2][3]
print a[..., 3][1, 2]
print
print a_obj[1][2][3]
print a_obj[1, 2, 3]
print a_obj[1, 2][3]
print a_obj[..., 3][1, 2]
try:
print a_obj[1:, 2][3]
except IndexError, e:
print e.args[0]
@testcase
def test_direct_slicing(arg):
......@@ -1618,3 +1724,322 @@ def test_object_indices():
for j in range(3):
print myslice[j]
cdef fused slice_1d:
object
int[:]
cdef fused slice_2d:
object
int[:, :]
@testcase
def test_ellipsis_expr():
"""
>>> test_ellipsis_expr()
8
8
"""
cdef int[10] a
cdef int[:] m = a
_test_ellipsis_expr(m)
_test_ellipsis_expr(<object> m)
cdef _test_ellipsis_expr(slice_1d m):
m[4] = 8
m[...] = m[...]
print m[4]
@testcase
def test_slice_assignment():
"""
>>> test_slice_assignment()
"""
cdef int carray[10][100]
cdef int i, j
for i in range(10):
for j in range(100):
carray[i][j] = i * 100 + j
cdef int[:, :] m = carray
cdef int[:, :] copy = m[-6:-1, 60:65].copy()
_test_slice_assignment(m, copy)
_test_slice_assignment(<object> m, <object> copy)
cdef _test_slice_assignment(slice_2d m, slice_2d copy):
cdef int i, j
m[...] = m[::-1, ::-1]
m[:, :] = m[::-1, ::-1]
m[-5:, -5:] = m[-6:-1, 60:65]
for i in range(5):
for j in range(5):
assert copy[i, j] == m[-5 + i, -5 + j], (copy[i, j], m[-5 + i, -5 + j])
@testcase
def test_slice_assignment_broadcast_leading():
"""
>>> test_slice_assignment_broadcast_leading()
"""
cdef int array1[1][10]
cdef int array2[10]
cdef int i
for i in range(10):
array1[0][i] = i
cdef int[:, :] a = array1
cdef int[:] b = array2
_test_slice_assignment_broadcast_leading(a, b)
for i in range(10):
array1[0][i] = i
_test_slice_assignment_broadcast_leading(<object> a, <object> b)
cdef _test_slice_assignment_broadcast_leading(slice_2d a, slice_1d b):
cdef int i
b[:] = a[:, :]
b = b[::-1]
a[:, :] = b[:]
for i in range(10):
assert a[0, i] == b[i] == 10 - 1 - i, (a[0, i], b[i], 10 - 1 - i)
@testcase
def test_slice_assignment_broadcast_strides():
"""
>>> test_slice_assignment_broadcast_strides()
"""
cdef int src_array[10]
cdef int dst_array[10][5]
cdef int i, j
for i in range(10):
src_array[i] = 10 - 1 - i
cdef int[:] src = src_array
cdef int[:, :] dst = dst_array
cdef int[:, :] dst_f = dst.copy_fortran()
_test_slice_assignment_broadcast_strides(src, dst, dst_f)
_test_slice_assignment_broadcast_strides(<object> src, <object> dst, <object> dst_f)
cdef _test_slice_assignment_broadcast_strides(slice_1d src, slice_2d dst, slice_2d dst_f):
cdef int i, j
dst[1:] = src[-1:-6:-1]
dst_f[1:] = src[-1:-6:-1]
for i in range(1, 10):
for j in range(1, 5):
assert dst[i, j] == dst_f[i, j] == j, (dst[i, j], dst_f[i, j], j)
# test overlapping memory with broadcasting
dst[:, 1:4] = dst[1, :3]
dst_f[:, 1:4] = dst[1, 1:4]
for i in range(10):
for j in range(1, 3):
assert dst[i, j] == dst_f[i, j] == j - 1, (dst[i, j], dst_f[i, j], j - 1)
@testcase
def test_borrowed_slice():
"""
Test the difference between borrowed an non-borrowed slices. If you delete or assign
to a slice in a cdef function, it is not borrowed.
>>> test_borrowed_slice()
5
5
5
"""
cdef int i, carray[10]
for i in range(10):
carray[i] = i
_borrowed(carray)
_not_borrowed(carray)
_not_borrowed2(carray)
cdef _borrowed(int[:] m):
print m[5]
cdef _not_borrowed(int[:] m):
print m[5]
if object():
del m
cdef _not_borrowed2(int[:] m):
cdef int[10] carray
print m[5]
if object():
m = carray
class SingleObject(object):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __eq__(self, other):
return self.value == getattr(other, 'value', None) or self.value == other
cdef _get_empty_object_slice(fill=None):
cdef cython.array a = cython.array((10,), sizeof(PyObject *), 'O')
assert a.dtype_is_object
return a
@testcase
def test_object_dtype_copying():
"""
>>> test_object_dtype_copying()
0
1
2
3
4
5
6
7
8
9
3 5
2 5
"""
cdef int i
unique = object()
unique_refcount = get_refcount(unique)
cdef object[:] m1 = _get_empty_object_slice()
cdef object[:] m2 = _get_empty_object_slice()
for i in range(10):
m1[i] = SingleObject(i)
m2[...] = m1
del m1
for i in range(10):
print m2[i]
obj = m2[5]
print get_refcount(obj), obj
del m2
print get_refcount(obj), obj
assert unique_refcount == get_refcount(unique), (unique_refcount, get_refcount(unique))
@testcase
def test_scalar_slice_assignment():
"""
>>> test_scalar_slice_assignment()
0
1
6
3
6
5
6
7
6
9
<BLANKLINE>
0
1
6
3
6
5
6
7
6
9
"""
cdef int[10] a
cdef int[:] m = a
cdef int a2[5][10]
cdef int[:, ::1] m2 = a2
_test_scalar_slice_assignment(m, m2)
print
_test_scalar_slice_assignment(<object> m, <object> m2)
cdef _test_scalar_slice_assignment(slice_1d m, slice_2d m2):
cdef int i, j
for i in range(10):
m[i] = i
m[-2:0:-2] = 6
for i in range(10):
print m[i]
for i in range(m2.shape[0]):
for j in range(m2.shape[1]):
m2[i, j] = i * m2.shape[1] + j
cdef int x = 2, y = -2
cdef long value = 1
m2[::2, ::-1] = value
m2[-2::-2, ::-1] = 2
m2[::2, -2::-2] = 0
m2[-2::-2, -2::-2] = 0
cdef int[:, :] s = m2[..., 1::2]
for i in range(s.shape[0]):
for j in range(s.shape[1]):
assert s[i, j] == i % 2 + 1, (s[i, j], i)
s = m2[::2, 1::2]
for i in range(s.shape[0]):
for j in range(s.shape[1]):
assert s[i, j] == 1, s[i, j]
s = m2[1::2, ::2]
for i in range(s.shape[0]):
for j in range(s.shape[1]):
assert s[i, j] == 0, s[i, j]
m2[...] = 3
for i in range(m2.shape[0]):
for j in range(m2.shape[1]):
assert m2[i, j] == 3, s[i, j]
@testcase
def test_contig_scalar_to_slice_assignment():
"""
>>> test_contig_scalar_to_slice_assignment()
14 14 14 14
20 20 20 20
"""
cdef int a[5][10]
cdef int[:, ::1] m = a
m[...] = 14
print m[0, 0], m[-1, -1], m[3, 2], m[4, 9]
m[:, :] = 20
print m[0, 0], m[-1, -1], m[3, 2], m[4, 9]
@testcase
def test_dtype_object_scalar_assignment():
"""
>>> test_dtype_object_scalar_assignment()
"""
cdef object[:] m = cython.array((10,), sizeof(PyObject *), 'O')
m[:] = SingleObject(2)
assert m[0] == m[4] == m[-1] == 2
(<object> m)[:] = SingleObject(3)
assert m[0] == m[4] == m[-1] == 3
......@@ -189,24 +189,46 @@ def test_transpose():
print a[3, 2], a.T[2, 3], a_obj[3, 2], a_obj.T[2, 3], numpy_obj[3, 2], numpy_obj.T[2, 3]
@testcase
@testcase_numpy_1_5
def test_numpy_like_attributes(cyarray):
"""
For some reason this fails in numpy 1.4, with shape () and strides (40, 8)
instead of 20, 4 on my machine. Investigate this.
>>> cyarray = create_array(shape=(8, 5), mode="c")
>>> test_numpy_like_attributes(cyarray)
>>> test_numpy_like_attributes(cyarray.memview)
"""
numarray = np.asarray(cyarray)
assert cyarray.shape == numarray.shape
assert cyarray.strides == numarray.strides
assert cyarray.ndim == numarray.ndim
assert cyarray.size == numarray.size
assert cyarray.nbytes == numarray.nbytes
assert cyarray.shape == numarray.shape, (cyarray.shape, numarray.shape)
assert cyarray.strides == numarray.strides, (cyarray.strides, numarray.strides)
assert cyarray.ndim == numarray.ndim, (cyarray.ndim, numarray.ndim)
assert cyarray.size == numarray.size, (cyarray.size, numarray.size)
assert cyarray.nbytes == numarray.nbytes, (cyarray.nbytes, numarray.nbytes)
cdef int[:, :] mslice = numarray
assert (<object> mslice).base is numarray
@testcase_numpy_1_5
def test_copy_and_contig_attributes(a):
"""
>>> a = np.arange(20, dtype=np.int32).reshape(5, 4)
>>> test_copy_and_contig_attributes(a)
"""
cdef np.int32_t[:, :] mslice = a
m = mslice
# Test object copy attributes
assert np.all(a == np.array(m.copy()))
assert a.strides == m.strides == m.copy().strides
assert np.all(a == np.array(m.copy_fortran()))
assert m.copy_fortran().strides == (4, 20)
# Test object is_*_contig attributes
assert m.is_c_contig() and m.copy().is_c_contig()
assert m.copy_fortran().is_f_contig() and not m.is_f_contig()
ctypedef int td_cy_int
cdef extern from "bufaccess.h":
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment