Commit 9e4c7e10 authored by Kevin Modzelewski's avatar Kevin Modzelewski

Let c api code use the PyTuple macros

Our tuple format is now the same as CPython's, so we can just enable
the fast macros again.

Little bit of trickiness since they declare their storage array to
have a size of 1; I'm not sure how important that is, but let's
play it safe and match it.
parent 66ef8e0c
......@@ -1185,7 +1185,7 @@ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx*/
#define PySequence_Fast_ITEMS(sf) \
(PyList_Check(sf) ? (PyList_Items(sf)) \
: (PyTuple_Items(sf)))
: ((PyTupleObject *)(sf))->ob_item)
/* Return a pointer to the underlying item array for
an object retured by PySequence_Fast */
......
......@@ -22,8 +22,6 @@ inserted in the tuple. Similarly, PyTuple_GetItem does not increment the
returned item's reference count.
*/
// Pyston change: this is not the format we're using (but maybe it should be)
#if 0
typedef struct {
PyObject_VAR_HEAD
PyObject *ob_item[1];
......@@ -33,9 +31,6 @@ typedef struct {
* the tuple is not yet visible outside the function that builds it.
*/
} PyTupleObject;
#endif
struct _PyTupleObject;
typedef struct _PyTupleObject PyTupleObject;
// Pyston change: this is no longer a static object
PyAPI_DATA(PyTypeObject*) tuple_cls;
......@@ -54,18 +49,11 @@ PyAPI_FUNC(int) _PyTuple_Resize(PyObject **, Py_ssize_t) PYSTON_NOEXCEPT;
PyAPI_FUNC(PyObject *) PyTuple_Pack(Py_ssize_t, ...) PYSTON_NOEXCEPT;
PyAPI_FUNC(void) _PyTuple_MaybeUntrack(PyObject *) PYSTON_NOEXCEPT;
// Pyston addition:
PyAPI_FUNC(PyObject **) PyTuple_Items(PyObject *) PYSTON_NOEXCEPT;
/* Macro, trading safety for speed */
// Pyston changes: these aren't direct macros any more [they potentially could be though]
#define PyTuple_GET_ITEM(op, i) PyTuple_GetItem(op, i)
#define PyTuple_GET_SIZE(op) PyTuple_Size(op)
//#define PyTuple_GET_ITEM(op, i) (((PyTupleObject *)(op))->ob_item[i])
//#define PyTuple_GET_SIZE(op) Py_SIZE(op)
#define PyTuple_GET_ITEM(op, i) (((PyTupleObject *)(op))->ob_item[i])
#define PyTuple_GET_SIZE(op) Py_SIZE(op)
/* Macro, *only* to be used to fill in brand new tuples */
#define PyTuple_SET_ITEM(op, i, v) PyTuple_SetItem((PyObject*)op, i, v)
//#define PyTuple_SET_ITEM(op, i, v) (((PyTupleObject *)(op))->ob_item[i] = v)
#define PyTuple_SET_ITEM(op, i, v) (((PyTupleObject *)(op))->ob_item[i] = v)
PyAPI_FUNC(int) PyTuple_ClearFreeList(void) PYSTON_NOEXCEPT;
......@@ -73,4 +61,3 @@ PyAPI_FUNC(int) PyTuple_ClearFreeList(void) PYSTON_NOEXCEPT;
}
#endif
#endif /* !Py_TUPLEOBJECT_H */
......@@ -510,7 +510,7 @@ void setupSys() {
}
void setupSysEnd() {
BoxedTuple::GCVector builtin_module_names;
std::vector<Box*, StlCompatAllocator<Box*>> builtin_module_names;
for (auto& p : sys_modules_dict->d) {
builtin_module_names.push_back(p.first);
}
......
......@@ -83,7 +83,7 @@ public:
if (!param_names.takes_param_names)
return EmptyTuple;
BoxedTuple::GCVector elts;
std::vector<Box*, StlCompatAllocator<Box*>> elts;
for (auto sr : param_names.args)
elts.push_back(boxString(sr));
if (param_names.vararg.size())
......
......@@ -435,10 +435,10 @@ extern "C" Box* listSetitemSlice(BoxedList* self, BoxedSlice* slice, Box* v) {
if (v_as_seq == NULL)
throwCAPIException();
v_size = PySequence_Fast_GET_SIZE(v_as_seq);
v_size = PySequence_Fast_GET_SIZE((Box*)v_as_seq);
// If lv->size is 0, lv->elts->elts is garbage
if (v_size)
v_elts = PySequence_Fast_ITEMS(v_as_seq);
v_elts = PySequence_Fast_ITEMS((Box*)v_as_seq);
else
v_elts = NULL;
}
......
......@@ -311,7 +311,7 @@ extern "C" Box** unpackIntoArray(Box* obj, int64_t expected_size) {
return &l->elts->elts[0];
}
BoxedTuple::GCVector elts;
std::vector<Box*, StlCompatAllocator<Box*>> elts;
for (auto e : obj->pyElements()) {
elts.push_back(e);
if (elts.size() > expected_size)
......
......@@ -69,12 +69,6 @@ Box* tupleGetitemInt(BoxedTuple* self, BoxedInt* slice) {
return tupleGetitemUnboxed(self, slice->n);
}
extern "C" PyObject** PyTuple_Items(PyObject* op) noexcept {
RELEASE_ASSERT(PyTuple_Check(op), "");
return &static_cast<BoxedTuple*>(op)->elts[0];
}
extern "C" PyObject* PyTuple_GetItem(PyObject* op, Py_ssize_t i) noexcept {
RELEASE_ASSERT(PyTuple_Check(op), "");
RELEASE_ASSERT(i >= 0, ""); // unlike tuple.__getitem__, PyTuple_GetItem doesn't do index wrapping
......
......@@ -202,7 +202,8 @@ void* BoxVar::operator new(size_t size, BoxedClass* cls, size_t nitems) {
ALLOC_STATS_VAR(cls);
assert(cls);
ASSERT(cls->tp_basicsize >= size, "%s", cls->tp_name);
// See definition of BoxedTuple for some notes on why we need this special case:
ASSERT(isSubclass(cls, tuple_cls) || cls->tp_basicsize >= size, "%s", cls->tp_name);
assert(cls->tp_itemsize > 0);
assert(cls->tp_alloc);
......@@ -2911,8 +2912,10 @@ void setupRuntime() {
object_cls->giveAttr("__base__", None);
// Not sure why CPython defines sizeof(PyTupleObject) to include one element,
// but we copy that, which means we have to subtract that extra pointer to get the tp_basicsize:
tuple_cls = new (0)
BoxedHeapClass(object_cls, &tupleGCHandler, 0, 0, sizeof(BoxedTuple), false, boxString("tuple"));
BoxedHeapClass(object_cls, &tupleGCHandler, 0, 0, sizeof(BoxedTuple) - sizeof(Box*), false, boxString("tuple"));
tuple_cls->tp_flags |= Py_TPFLAGS_TUPLE_SUBCLASS;
tuple_cls->tp_itemsize = sizeof(Box*);
tuple_cls->tp_mro = BoxedTuple::create({ tuple_cls, object_cls });
......
......@@ -554,10 +554,6 @@ public:
class BoxedTuple : public BoxVar {
public:
typedef std::vector<Box*, StlCompatAllocator<Box*>> GCVector;
DEFAULT_CLASS_VAR_SIMPLE(tuple_cls, sizeof(Box*));
static BoxedTuple* create(int64_t size) { return new (size) BoxedTuple(size); }
static BoxedTuple* create(int64_t nelts, Box** elts) {
BoxedTuple* rtn = new (nelts) BoxedTuple(nelts);
......@@ -602,6 +598,31 @@ public:
size_t size() const { return ob_size; }
// DEFAULT_CLASS_VAR_SIMPLE doesn't work because of declaring 1 element in 'elts'
void* operator new(size_t size, BoxedClass* cls, size_t nitems) __attribute__((visibility("default"))) {
ALLOC_STATS_VAR(tuple_cls)
assert(cls->tp_itemsize == sizeof(Box*));
return BoxVar::operator new(size, cls, nitems);
}
void* operator new(size_t size, size_t nitems) __attribute__((visibility("default"))) {
ALLOC_STATS_VAR(tuple_cls)
assert(tuple_cls->tp_alloc == PystonType_GenericAlloc);
assert(tuple_cls->tp_itemsize == sizeof(Box*));
assert(tuple_cls->tp_basicsize == offsetof(BoxedTuple, elts));
assert(tuple_cls->is_pyston_class);
assert(tuple_cls->attrs_offset == 0);
void* mem = gc_alloc(sizeof(BoxedTuple) + nitems * sizeof(Box*), gc::GCKind::PYTHON);
assert(mem);
BoxVar* rtn = static_cast<BoxVar*>(mem);
rtn->cls = tuple_cls;
rtn->ob_size = nitems;
return rtn;
}
private:
BoxedTuple(size_t size) { memset(elts, 0, sizeof(Box*) * size); }
......@@ -614,8 +635,18 @@ private:
}
public:
Box* elts[0];
// CPython declares ob_item (their version of elts) to have 1 element. We want to
// copy that behavior so that the sizes of the objects match, but we want to also
// have a zero-length array in there since we have some extra compiler warnings turned
// on. _elts[1] will throw an error, but elts[1] will not.
union {
Box* elts[0];
Box* _elts[1];
};
};
static_assert(sizeof(BoxedTuple) == sizeof(PyTupleObject), "");
static_assert(offsetof(BoxedTuple, ob_size) == offsetof(PyTupleObject, ob_size), "");
static_assert(offsetof(BoxedTuple, elts) == offsetof(PyTupleObject, ob_item), "");
extern "C" BoxedTuple* EmptyTuple;
extern "C" BoxedString* EmptyString;
......
diff -ur PyICU-1.0.1_o/common.h PyICU-1.0.1/common.h
--- PyICU-1.0.1_o/common.h 2010-03-29 20:04:02.000000000 +0200
+++ PyICU-1.0.1/common.h 2015-05-18 22:38:10.625582065 +0200
@@ -199,9 +199,15 @@
#else
+// Pyston change:
+/*
#define parseArgs(args, types, rest...) \
_parseArgs(((PyTupleObject *)(args))->ob_item, \
((PyTupleObject *)(args))->ob_size, types, ##rest)
+*/
+#define parseArgs(args, types, rest...) \
+ _parseArgs(PyTuple_Items(args), PyTuple_Size(args), types, ##rest)
+
#define parseArg(arg, types, rest...) \
_parseArgs(&(arg), 1, types, ##rest)
diff -ur PyICU-1.0.1_o/_icu.cpp PyICU-1.0.1/_icu.cpp
--- PyICU-1.0.1_o/_icu.cpp 2010-04-02 00:12:54.000000000 +0200
+++ PyICU-1.0.1/_icu.cpp 2015-05-19 15:26:15.131375981 +0200
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment