Commit 7ea143ae authored by INADA Naoki's avatar INADA Naoki Committed by GitHub

bpo-29469: Move constant folding to AST optimizer (GH-2858)

parent b5fd9ad0
...@@ -581,6 +581,9 @@ Optimizations ...@@ -581,6 +581,9 @@ Optimizations
and :meth:`selectors.DevpollSelector.modify` may be around 10% faster under and :meth:`selectors.DevpollSelector.modify` may be around 10% faster under
heavy loads. (Contributed by Giampaolo Rodola' in :issue:`30014`) heavy loads. (Contributed by Giampaolo Rodola' in :issue:`30014`)
* Constant folding is moved from peephole optimizer to new AST optimizer.
(Contributed by Eugene Toder and INADA Naoki in :issue:`29469`)
Build and C API Changes Build and C API Changes
======================= =======================
......
...@@ -75,6 +75,8 @@ PyAPI_FUNC(PyObject*) _Py_Mangle(PyObject *p, PyObject *name); ...@@ -75,6 +75,8 @@ PyAPI_FUNC(PyObject*) _Py_Mangle(PyObject *p, PyObject *name);
#define PY_INVALID_STACK_EFFECT INT_MAX #define PY_INVALID_STACK_EFFECT INT_MAX
PyAPI_FUNC(int) PyCompile_OpcodeStackEffect(int opcode, int oparg); PyAPI_FUNC(int) PyCompile_OpcodeStackEffect(int opcode, int oparg);
PyAPI_FUNC(int) _PyAST_Optimize(struct _mod *, PyArena *arena);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
...@@ -319,6 +319,7 @@ PYTHON_OBJS= \ ...@@ -319,6 +319,7 @@ PYTHON_OBJS= \
Python/Python-ast.o \ Python/Python-ast.o \
Python/asdl.o \ Python/asdl.o \
Python/ast.o \ Python/ast.o \
Python/ast_opt.o \
Python/bltinmodule.o \ Python/bltinmodule.o \
Python/ceval.o \ Python/ceval.o \
Python/compile.o \ Python/compile.o \
......
Move constant folding from bytecode layer to AST layer.
Original patch by Eugene Toder.
...@@ -356,6 +356,7 @@ ...@@ -356,6 +356,7 @@
<ClCompile Include="..\Python\_warnings.c" /> <ClCompile Include="..\Python\_warnings.c" />
<ClCompile Include="..\Python\asdl.c" /> <ClCompile Include="..\Python\asdl.c" />
<ClCompile Include="..\Python\ast.c" /> <ClCompile Include="..\Python\ast.c" />
<ClCompile Include="..\Python\ast_opt.c" />
<ClCompile Include="..\Python\bltinmodule.c" /> <ClCompile Include="..\Python\bltinmodule.c" />
<ClCompile Include="..\Python\bootstrap_hash.c" /> <ClCompile Include="..\Python\bootstrap_hash.c" />
<ClCompile Include="..\Python\ceval.c" /> <ClCompile Include="..\Python\ceval.c" />
......
...@@ -824,6 +824,9 @@ ...@@ -824,6 +824,9 @@
<ClCompile Include="..\Python\ast.c"> <ClCompile Include="..\Python\ast.c">
<Filter>Python</Filter> <Filter>Python</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\Python\ast_opt.c">
<Filter>Python</Filter>
</ClCompile>
<ClCompile Include="..\Python\bltinmodule.c"> <ClCompile Include="..\Python\bltinmodule.c">
<Filter>Python</Filter> <Filter>Python</Filter>
</ClCompile> </ClCompile>
......
This diff is collapsed.
...@@ -331,6 +331,10 @@ PyAST_CompileObject(mod_ty mod, PyObject *filename, PyCompilerFlags *flags, ...@@ -331,6 +331,10 @@ PyAST_CompileObject(mod_ty mod, PyObject *filename, PyCompilerFlags *flags,
c.c_optimize = (optimize == -1) ? Py_OptimizeFlag : optimize; c.c_optimize = (optimize == -1) ? Py_OptimizeFlag : optimize;
c.c_nestlevel = 0; c.c_nestlevel = 0;
if (!_PyAST_Optimize(mod, arena)) {
goto finally;
}
c.c_st = PySymtable_BuildObject(mod, filename, c.c_future); c.c_st = PySymtable_BuildObject(mod, filename, c.c_future);
if (c.c_st == NULL) { if (c.c_st == NULL) {
if (!PyErr_Occurred()) if (!PyErr_Occurred())
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -218,153 +218,6 @@ fold_tuple_on_constants(_Py_CODEUNIT *codestr, Py_ssize_t c_start, ...@@ -218,153 +218,6 @@ fold_tuple_on_constants(_Py_CODEUNIT *codestr, Py_ssize_t c_start,
return copy_op_arg(codestr, c_start, LOAD_CONST, len_consts, opcode_end); return copy_op_arg(codestr, c_start, LOAD_CONST, len_consts, opcode_end);
} }
/* Replace LOAD_CONST c1, LOAD_CONST c2, BINOP
with LOAD_CONST binop(c1,c2)
The consts table must still be in list form so that the
new constant can be appended.
Called with codestr pointing to the BINOP.
Abandons the transformation if the folding fails (i.e. 1+'a').
If the new constant is a sequence, only folds when the size
is below a threshold value. That keeps pyc files from
becoming large in the presence of code like: (None,)*1000.
*/
static Py_ssize_t
fold_binops_on_constants(_Py_CODEUNIT *codestr, Py_ssize_t c_start,
Py_ssize_t opcode_end, unsigned char opcode,
PyObject *consts, PyObject **objs)
{
PyObject *newconst, *v, *w;
Py_ssize_t len_consts, size;
/* Pre-conditions */
assert(PyList_CheckExact(consts));
len_consts = PyList_GET_SIZE(consts);
/* Create new constant */
v = objs[0];
w = objs[1];
switch (opcode) {
case BINARY_POWER:
newconst = PyNumber_Power(v, w, Py_None);
break;
case BINARY_MULTIPLY:
newconst = PyNumber_Multiply(v, w);
break;
case BINARY_TRUE_DIVIDE:
newconst = PyNumber_TrueDivide(v, w);
break;
case BINARY_FLOOR_DIVIDE:
newconst = PyNumber_FloorDivide(v, w);
break;
case BINARY_MODULO:
newconst = PyNumber_Remainder(v, w);
break;
case BINARY_ADD:
newconst = PyNumber_Add(v, w);
break;
case BINARY_SUBTRACT:
newconst = PyNumber_Subtract(v, w);
break;
case BINARY_SUBSCR:
newconst = PyObject_GetItem(v, w);
break;
case BINARY_LSHIFT:
newconst = PyNumber_Lshift(v, w);
break;
case BINARY_RSHIFT:
newconst = PyNumber_Rshift(v, w);
break;
case BINARY_AND:
newconst = PyNumber_And(v, w);
break;
case BINARY_XOR:
newconst = PyNumber_Xor(v, w);
break;
case BINARY_OR:
newconst = PyNumber_Or(v, w);
break;
default:
/* Called with an unknown opcode */
PyErr_Format(PyExc_SystemError,
"unexpected binary operation %d on a constant",
opcode);
return -1;
}
if (newconst == NULL) {
if(!PyErr_ExceptionMatches(PyExc_KeyboardInterrupt)) {
PyErr_Clear();
}
return -1;
}
size = PyObject_Size(newconst);
if (size == -1) {
if (PyErr_ExceptionMatches(PyExc_KeyboardInterrupt)) {
return -1;
}
PyErr_Clear();
} else if (size > 20) {
Py_DECREF(newconst);
return -1;
}
/* Append folded constant into consts table */
if (PyList_Append(consts, newconst)) {
Py_DECREF(newconst);
return -1;
}
Py_DECREF(newconst);
return copy_op_arg(codestr, c_start, LOAD_CONST, len_consts, opcode_end);
}
static Py_ssize_t
fold_unaryops_on_constants(_Py_CODEUNIT *codestr, Py_ssize_t c_start,
Py_ssize_t opcode_end, unsigned char opcode,
PyObject *consts, PyObject *v)
{
PyObject *newconst;
Py_ssize_t len_consts;
/* Pre-conditions */
assert(PyList_CheckExact(consts));
len_consts = PyList_GET_SIZE(consts);
/* Create new constant */
switch (opcode) {
case UNARY_NEGATIVE:
newconst = PyNumber_Negative(v);
break;
case UNARY_INVERT:
newconst = PyNumber_Invert(v);
break;
case UNARY_POSITIVE:
newconst = PyNumber_Positive(v);
break;
default:
/* Called with an unknown opcode */
PyErr_Format(PyExc_SystemError,
"unexpected unary operation %d on a constant",
opcode);
return -1;
}
if (newconst == NULL) {
if(!PyErr_ExceptionMatches(PyExc_KeyboardInterrupt)) {
PyErr_Clear();
}
return -1;
}
/* Append folded constant into consts table */
if (PyList_Append(consts, newconst)) {
Py_DECREF(newconst);
PyErr_Clear();
return -1;
}
Py_DECREF(newconst);
return copy_op_arg(codestr, c_start, LOAD_CONST, len_consts, opcode_end);
}
static unsigned int * static unsigned int *
markblocks(_Py_CODEUNIT *code, Py_ssize_t len) markblocks(_Py_CODEUNIT *code, Py_ssize_t len)
{ {
...@@ -566,52 +419,6 @@ PyCode_Optimize(PyObject *code, PyObject* consts, PyObject *names, ...@@ -566,52 +419,6 @@ PyCode_Optimize(PyObject *code, PyObject* consts, PyObject *names,
} }
break; break;
/* Fold binary ops on constants.
LOAD_CONST c1 LOAD_CONST c2 BINOP --> LOAD_CONST binop(c1,c2) */
case BINARY_POWER:
case BINARY_MULTIPLY:
case BINARY_TRUE_DIVIDE:
case BINARY_FLOOR_DIVIDE:
case BINARY_MODULO:
case BINARY_ADD:
case BINARY_SUBTRACT:
case BINARY_SUBSCR:
case BINARY_LSHIFT:
case BINARY_RSHIFT:
case BINARY_AND:
case BINARY_XOR:
case BINARY_OR:
if (CONST_STACK_LEN() < 2)
break;
h = lastn_const_start(codestr, op_start, 2);
if (ISBASICBLOCK(blocks, h, op_start)) {
h = fold_binops_on_constants(codestr, h, i + 1, opcode,
consts, CONST_STACK_LASTN(2));
if (h >= 0) {
CONST_STACK_POP(2);
CONST_STACK_PUSH_OP(h);
}
}
break;
/* Fold unary ops on constants.
LOAD_CONST c1 UNARY_OP --> LOAD_CONST unary_op(c) */
case UNARY_NEGATIVE:
case UNARY_INVERT:
case UNARY_POSITIVE:
if (CONST_STACK_LEN() < 1)
break;
h = lastn_const_start(codestr, op_start, 1);
if (ISBASICBLOCK(blocks, h, op_start)) {
h = fold_unaryops_on_constants(codestr, h, i + 1, opcode,
consts, *CONST_STACK_LASTN(1));
if (h >= 0) {
CONST_STACK_POP(1);
CONST_STACK_PUSH_OP(h);
}
}
break;
/* Simplify conditional jump to conditional jump where the /* Simplify conditional jump to conditional jump where the
result of the first test implies the success of a similar result of the first test implies the success of a similar
test or the failure of the opposite test. test or the failure of the opposite test.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment