Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
C
cpython
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
cpython
Commits
0d441119
Commit
0d441119
authored
Nov 14, 2015
by
Serhiy Storchaka
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Issue #25388: Fixed tokenizer crash when processing undecodable source code
with a null byte.
parent
806fb254
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
19 additions
and
8 deletions
+19
-8
Lib/test/test_compile.py
Lib/test/test_compile.py
+10
-0
Misc/NEWS
Misc/NEWS
+3
-0
Parser/tokenizer.c
Parser/tokenizer.c
+6
-8
No files found.
Lib/test/test_compile.py
View file @
0d441119
...
...
@@ -504,6 +504,16 @@ if 1:
res
=
script_helper
.
run_python_until_end
(
fn
)[
0
]
self
.
assertIn
(
b"Non-UTF-8"
,
res
.
err
)
def
test_yet_more_evil_still_undecodable
(
self
):
# Issue #25388
src
=
b"#
\
x00
\
n
#
\
xfd
\
n
"
with
tempfile
.
TemporaryDirectory
()
as
tmpd
:
fn
=
os
.
path
.
join
(
tmpd
,
"bad.py"
)
with
open
(
fn
,
"wb"
)
as
fp
:
fp
.
write
(
src
)
res
=
script_helper
.
run_python_until_end
(
fn
)[
0
]
self
.
assertIn
(
b"Non-UTF-8"
,
res
.
err
)
@
support
.
cpython_only
def
test_compiler_recursion_limit
(
self
):
# Expected limit is sys.getrecursionlimit() * the scaling factor
...
...
Misc/NEWS
View file @
0d441119
...
...
@@ -10,6 +10,9 @@ Release date: tba
Core and Builtins
-----------------
- Issue #25388: Fixed tokenizer crash when processing undecodable source code
with a null byte.
- Issue #22995: Default implementation of __reduce__ and __reduce_ex__ now
rejects builtin types with not defined __new__.
...
...
Parser/tokenizer.c
View file @
0d441119
...
...
@@ -187,7 +187,8 @@ error_ret(struct tok_state *tok) /* XXX */
tok
->
decoding_erred
=
1
;
if
(
tok
->
fp
!=
NULL
&&
tok
->
buf
!=
NULL
)
/* see PyTokenizer_Free */
PyMem_FREE
(
tok
->
buf
);
tok
->
buf
=
NULL
;
tok
->
buf
=
tok
->
cur
=
tok
->
end
=
tok
->
inp
=
tok
->
start
=
NULL
;
tok
->
done
=
E_DECODE
;
return
NULL
;
/* as if it were EOF */
}
...
...
@@ -943,11 +944,6 @@ tok_nextc(struct tok_state *tok)
}
buflen
=
PyBytes_GET_SIZE
(
u
);
buf
=
PyBytes_AS_STRING
(
u
);
if
(
!
buf
)
{
Py_DECREF
(
u
);
tok
->
done
=
E_DECODE
;
return
EOF
;
}
newtok
=
PyMem_MALLOC
(
buflen
+
1
);
strcpy
(
newtok
,
buf
);
Py_DECREF
(
u
);
...
...
@@ -989,7 +985,6 @@ tok_nextc(struct tok_state *tok)
if
(
tok
->
buf
!=
NULL
)
PyMem_FREE
(
tok
->
buf
);
tok
->
buf
=
newtok
;
tok
->
line_start
=
tok
->
buf
;
tok
->
cur
=
tok
->
buf
;
tok
->
line_start
=
tok
->
buf
;
tok
->
inp
=
strchr
(
tok
->
buf
,
'\0'
);
...
...
@@ -1012,7 +1007,8 @@ tok_nextc(struct tok_state *tok)
}
if
(
decoding_fgets
(
tok
->
buf
,
(
int
)(
tok
->
end
-
tok
->
buf
),
tok
)
==
NULL
)
{
tok
->
done
=
E_EOF
;
if
(
!
tok
->
decoding_erred
)
tok
->
done
=
E_EOF
;
done
=
1
;
}
else
{
...
...
@@ -1046,6 +1042,8 @@ tok_nextc(struct tok_state *tok)
return
EOF
;
}
tok
->
buf
=
newbuf
;
tok
->
cur
=
tok
->
buf
+
cur
;
tok
->
line_start
=
tok
->
cur
;
tok
->
inp
=
tok
->
buf
+
curvalid
;
tok
->
end
=
tok
->
buf
+
newsize
;
tok
->
start
=
curstart
<
0
?
NULL
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment