Commit 86e5e17b authored by Ezio Melotti's avatar Ezio Melotti

Merged revisions 81758-81759 via svnmerge from

svn+ssh://pythondev@svn.python.org/python/trunk

........
  r81758 | ezio.melotti | 2010-06-05 20:51:07 +0300 (Sat, 05 Jun 2010) | 15 lines

  Update PyUnicode_DecodeUTF8 from RFC 2279 to RFC 3629.

  1) #8271: when a byte sequence is invalid, only the start byte and all the
     valid continuation bytes are now replaced by U+FFFD, instead of replacing
     the number of bytes specified by the start byte.
     See http://www.unicode.org/versions/Unicode5.2.0/ch03.pdf (pages 94-95);
  2) 5- and 6-bytes-long UTF-8 sequences are now considered invalid (no changes
     in behavior);
  3) Add code and tests to reject surrogates (U+D800-U+DFFF) as defined in
     RFC 3629, but leave it commented out since it's not backward compatible;
  4) Change the error messages "unexpected code byte" to "invalid start byte"
     and "invalid data" to "invalid continuation byte";
  5) Add an extensive set of tests in test_unicode;
  6) Fix test_codeccallbacks because it was failing after this change.
........
  r81759 | ezio.melotti | 2010-06-05 22:21:32 +0300 (Sat, 05 Jun 2010) | 1 line

  Add a NEWS entry for r81758 and clarify a comment.
........
parent 28fbea41
......@@ -153,28 +153,30 @@ class CodecCallbackTest(unittest.TestCase):
sout += "\\U%08x" % sys.maxunicode
self.assertEqual(sin.encode("iso-8859-15", "backslashreplace"), sout)
def test_decoderelaxedutf8(self):
# This is the test for a decoding callback handler,
# that relaxes the UTF-8 minimal encoding restriction.
# A null byte that is encoded as "\xc0\x80" will be
# decoded as a null byte. All other illegal sequences
# will be handled strictly.
def test_decoding_callbacks(self):
# This is a test for a decoding callback handler
# that allows the decoding of the invalid sequence
# "\xc0\x80" and returns "\x00" instead of raising an error.
# All other illegal sequences will be handled strictly.
def relaxedutf8(exc):
if not isinstance(exc, UnicodeDecodeError):
raise TypeError("don't know how to handle %r" % exc)
if exc.object[exc.start:exc.end].startswith("\xc0\x80"):
if exc.object[exc.start:exc.start+2] == "\xc0\x80":
return (u"\x00", exc.start+2) # retry after two bytes
else:
raise exc
codecs.register_error(
"test.relaxedutf8", relaxedutf8)
codecs.register_error("test.relaxedutf8", relaxedutf8)
# all the "\xc0\x80" will be decoded to "\x00"
sin = "a\x00b\xc0\x80c\xc3\xbc\xc0\x80\xc0\x80"
sout = u"a\x00b\x00c\xfc\x00\x00"
self.assertEqual(sin.decode("utf-8", "test.relaxedutf8"), sout)
# "\xc0\x81" is not valid and a UnicodeDecodeError will be raised
sin = "\xc0\x80\xc0\x81"
self.assertRaises(UnicodeError, sin.decode, "utf-8", "test.relaxedutf8")
self.assertRaises(UnicodeDecodeError, sin.decode,
"utf-8", "test.relaxedutf8")
def test_charmapencode(self):
# For charmap encodings the replacement string will be
......
......@@ -587,6 +587,164 @@ class UnicodeTest(
# * strict decoding testing for all of the
# UTF8_ERROR cases in PyUnicode_DecodeUTF8
def test_utf8_decode_valid_sequences(self):
sequences = [
# single byte
('\x00', u'\x00'), ('a', u'a'), ('\x7f', u'\x7f'),
# 2 bytes
('\xc2\x80', u'\x80'), ('\xdf\xbf', u'\u07ff'),
# 3 bytes
('\xe0\xa0\x80', u'\u0800'), ('\xed\x9f\xbf', u'\ud7ff'),
('\xee\x80\x80', u'\uE000'), ('\xef\xbf\xbf', u'\uffff'),
# 4 bytes
('\xF0\x90\x80\x80', u'\U00010000'),
('\xf4\x8f\xbf\xbf', u'\U0010FFFF')
]
for seq, res in sequences:
self.assertEqual(seq.decode('utf-8'), res)
for ch in map(unichr, range(0, sys.maxunicode)):
self.assertEqual(ch, ch.encode('utf-8').decode('utf-8'))
def test_utf8_decode_invalid_sequences(self):
# continuation bytes in a sequence of 2, 3, or 4 bytes
continuation_bytes = map(chr, range(0x80, 0xC0))
# start bytes of a 2-byte sequence equivalent to codepoints < 0x7F
invalid_2B_seq_start_bytes = map(chr, range(0xC0, 0xC2))
# start bytes of a 4-byte sequence equivalent to codepoints > 0x10FFFF
invalid_4B_seq_start_bytes = map(chr, range(0xF5, 0xF8))
invalid_start_bytes = (
continuation_bytes + invalid_2B_seq_start_bytes +
invalid_4B_seq_start_bytes + map(chr, range(0xF7, 0x100))
)
for byte in invalid_start_bytes:
self.assertRaises(UnicodeDecodeError, byte.decode, 'utf-8')
for sb in invalid_2B_seq_start_bytes:
for cb in continuation_bytes:
self.assertRaises(UnicodeDecodeError, (sb+cb).decode, 'utf-8')
for sb in invalid_4B_seq_start_bytes:
for cb1 in continuation_bytes[:3]:
for cb3 in continuation_bytes[:3]:
self.assertRaises(UnicodeDecodeError,
(sb+cb1+'\x80'+cb3).decode, 'utf-8')
for cb in map(chr, range(0x80, 0xA0)):
self.assertRaises(UnicodeDecodeError,
('\xE0'+cb+'\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
('\xE0'+cb+'\xBF').decode, 'utf-8')
# XXX: surrogates shouldn't be valid UTF-8!
# see http://www.unicode.org/versions/Unicode5.2.0/ch03.pdf
# (table 3-7) and http://www.rfc-editor.org/rfc/rfc3629.txt
#for cb in map(chr, range(0xA0, 0xC0)):
#sys.__stdout__.write('\\xED\\x%02x\\x80\n' % ord(cb))
#self.assertRaises(UnicodeDecodeError,
#('\xED'+cb+'\x80').decode, 'utf-8')
#self.assertRaises(UnicodeDecodeError,
#('\xED'+cb+'\xBF').decode, 'utf-8')
for cb in map(chr, range(0x80, 0x90)):
self.assertRaises(UnicodeDecodeError,
('\xF0'+cb+'\x80\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
('\xF0'+cb+'\xBF\xBF').decode, 'utf-8')
for cb in map(chr, range(0x90, 0xC0)):
self.assertRaises(UnicodeDecodeError,
('\xF4'+cb+'\x80\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
('\xF4'+cb+'\xBF\xBF').decode, 'utf-8')
def test_issue8271(self):
# Issue #8271: during the decoding of an invalid UTF-8 byte sequence,
# only the start byte and the continuation byte(s) are now considered
# invalid, instead of the number of bytes specified by the start byte.
# See http://www.unicode.org/versions/Unicode5.2.0/ch03.pdf (page 95,
# table 3-8, Row 2) for more information about the algorithm used.
FFFD = u'\ufffd'
sequences = [
# invalid start bytes
('\x80', FFFD), # continuation byte
('\x80\x80', FFFD*2), # 2 continuation bytes
('\xc0', FFFD),
('\xc0\xc0', FFFD*2),
('\xc1', FFFD),
('\xc1\xc0', FFFD*2),
('\xc0\xc1', FFFD*2),
# with start byte of a 2-byte sequence
('\xc2', FFFD), # only the start byte
('\xc2\xc2', FFFD*2), # 2 start bytes
('\xc2\xc2\xc2', FFFD*3), # 2 start bytes
('\xc2\x41', FFFD+'A'), # invalid continuation byte
# with start byte of a 3-byte sequence
('\xe1', FFFD), # only the start byte
('\xe1\xe1', FFFD*2), # 2 start bytes
('\xe1\xe1\xe1', FFFD*3), # 3 start bytes
('\xe1\xe1\xe1\xe1', FFFD*4), # 4 start bytes
('\xe1\x80', FFFD), # only 1 continuation byte
('\xe1\x41', FFFD+'A'), # invalid continuation byte
('\xe1\x41\x80', FFFD+'A'+FFFD), # invalid cb followed by valid cb
('\xe1\x41\x41', FFFD+'AA'), # 2 invalid continuation bytes
('\xe1\x80\x41', FFFD+'A'), # only 1 valid continuation byte
('\xe1\x80\xe1\x41', FFFD*2+'A'), # 1 valid and the other invalid
('\xe1\x41\xe1\x80', FFFD+'A'+FFFD), # 1 invalid and the other valid
# with start byte of a 4-byte sequence
('\xf1', FFFD), # only the start byte
('\xf1\xf1', FFFD*2), # 2 start bytes
('\xf1\xf1\xf1', FFFD*3), # 3 start bytes
('\xf1\xf1\xf1\xf1', FFFD*4), # 4 start bytes
('\xf1\xf1\xf1\xf1\xf1', FFFD*5), # 5 start bytes
('\xf1\x80', FFFD), # only 1 continuation bytes
('\xf1\x80\x80', FFFD), # only 2 continuation bytes
('\xf1\x80\x41', FFFD+'A'), # 1 valid cb and 1 invalid
('\xf1\x80\x41\x41', FFFD+'AA'), # 1 valid cb and 1 invalid
('\xf1\x80\x80\x41', FFFD+'A'), # 2 valid cb and 1 invalid
('\xf1\x41\x80', FFFD+'A'+FFFD), # 1 invalid cv and 1 valid
('\xf1\x41\x80\x80', FFFD+'A'+FFFD*2), # 1 invalid cb and 2 invalid
('\xf1\x41\x80\x41', FFFD+'A'+FFFD+'A'), # 2 invalid cb and 1 invalid
('\xf1\x41\x41\x80', FFFD+'AA'+FFFD), # 1 valid cb and 1 invalid
('\xf1\x41\xf1\x80', FFFD+'A'+FFFD),
('\xf1\x41\x80\xf1', FFFD+'A'+FFFD*2),
('\xf1\xf1\x80\x41', FFFD*2+'A'),
('\xf1\x41\xf1\xf1', FFFD+'A'+FFFD*2),
# with invalid start byte of a 4-byte sequence (rfc2279)
('\xf5', FFFD), # only the start byte
('\xf5\xf5', FFFD*2), # 2 start bytes
('\xf5\x80', FFFD*2), # only 1 continuation byte
('\xf5\x80\x80', FFFD*3), # only 2 continuation byte
('\xf5\x80\x80\x80', FFFD*4), # 3 continuation bytes
('\xf5\x80\x41', FFFD*2+'A'), # 1 valid cb and 1 invalid
('\xf5\x80\x41\xf5', FFFD*2+'A'+FFFD),
('\xf5\x41\x80\x80\x41', FFFD+'A'+FFFD*2+'A'),
# with invalid start byte of a 5-byte sequence (rfc2279)
('\xf8', FFFD), # only the start byte
('\xf8\xf8', FFFD*2), # 2 start bytes
('\xf8\x80', FFFD*2), # only one continuation byte
('\xf8\x80\x41', FFFD*2 + 'A'), # 1 valid cb and 1 invalid
('\xf8\x80\x80\x80\x80', FFFD*5), # invalid 5 bytes seq with 5 bytes
# with invalid start byte of a 6-byte sequence (rfc2279)
('\xfc', FFFD), # only the start byte
('\xfc\xfc', FFFD*2), # 2 start bytes
('\xfc\x80\x80', FFFD*3), # only 2 continuation bytes
('\xfc\x80\x80\x80\x80\x80', FFFD*6), # 6 continuation bytes
# invalid start byte
('\xfe', FFFD),
('\xfe\x80\x80', FFFD*3),
# other sequences
('\xf1\x80\x41\x42\x43', u'\ufffd\x41\x42\x43'),
('\xf1\x80\xff\x42\x43', u'\ufffd\ufffd\x42\x43'),
('\xf1\x80\xc2\x81\x43', u'\ufffd\x81\x43'),
('\x61\xF1\x80\x80\xE1\x80\xC2\x62\x80\x63\x80\xBF\x64',
u'\x61\uFFFD\uFFFD\uFFFD\x62\uFFFD\x63\uFFFD\uFFFD\x64'),
]
for n, (seq, res) in enumerate(sequences):
self.assertRaises(UnicodeDecodeError, seq.decode, 'utf-8', 'strict')
self.assertEqual(seq.decode('utf-8', 'replace'), res)
self.assertEqual((seq+'b').decode('utf-8', 'replace'), res+'b')
self.assertEqual(seq.decode('utf-8', 'ignore'),
res.replace(u'\uFFFD', ''))
def test_codecs_idna(self):
# Test whether trailing dot is preserved
self.assertEqual(u"www.python.org.".encode("idna"), "www.python.org.")
......
......@@ -12,6 +12,14 @@ What's New in Python 2.6.6 alpha 1?
Core and Builtins
-----------------
- Issue #8271: during the decoding of an invalid UTF-8 byte sequence, only the
start byte and the continuation byte(s) are now considered invalid, instead
of the number of bytes specified by the start byte.
E.g.: '\xf1\x80AB'.decode('utf-8', 'replace') now returns u'\ufffdAB' and
replaces with U+FFFD only the start byte ('\xf1') and the continuation byte
('\x80') even if '\xf1' is the start byte of a 4-bytes sequence.
Previous versions returned a single u'\ufffd'.
- Issue #9058: Remove assertions about INT_MAX in UnicodeDecodeError.
- Issue #8941: decoding big endian UTF-32 data in UCS-2 builds could crash
......
......@@ -1735,24 +1735,24 @@ PyObject *PyUnicode_EncodeUTF7(const Py_UNICODE *s,
static
char utf8_code_length[256] = {
/* Map UTF-8 encoded prefix byte to sequence length. zero means
illegal prefix. see RFC 2279 for details */
/* Map UTF-8 encoded prefix byte to sequence length. Zero means
illegal prefix. See RFC 3629 for details */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 00-0F */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 70-7F */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 80-8F */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 0, 0
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* B0-BF */
0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, /* C0-C1 + C2-CF */
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, /* D0-DF */
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, /* E0-EF */
4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F0-F4 + F5-FF */
};
PyObject *PyUnicode_DecodeUTF8(const char *s,
......@@ -1769,6 +1769,7 @@ PyObject *PyUnicode_DecodeUTF8Stateful(const char *s,
{
const char *starts = s;
int n;
int k;
Py_ssize_t startinpos;
Py_ssize_t endinpos;
Py_ssize_t outpos;
......@@ -1811,7 +1812,9 @@ PyObject *PyUnicode_DecodeUTF8Stateful(const char *s,
else {
errmsg = "unexpected end of data";
startinpos = s-starts;
endinpos = size;
endinpos = startinpos+1;
for (k=1; (k < size-startinpos) && ((s[k]&0xC0) == 0x80); k++)
endinpos++;
goto utf8Error;
}
}
......@@ -1819,7 +1822,7 @@ PyObject *PyUnicode_DecodeUTF8Stateful(const char *s,
switch (n) {
case 0:
errmsg = "unexpected code byte";
errmsg = "invalid start byte";
startinpos = s-starts;
endinpos = startinpos+1;
goto utf8Error;
......@@ -1832,70 +1835,67 @@ PyObject *PyUnicode_DecodeUTF8Stateful(const char *s,
case 2:
if ((s[1] & 0xc0) != 0x80) {
errmsg = "invalid data";
errmsg = "invalid continuation byte";
startinpos = s-starts;
endinpos = startinpos+2;
endinpos = startinpos + 1;
goto utf8Error;
}
ch = ((s[0] & 0x1f) << 6) + (s[1] & 0x3f);
if (ch < 0x80) {
startinpos = s-starts;
endinpos = startinpos+2;
errmsg = "illegal encoding";
goto utf8Error;
}
else
*p++ = (Py_UNICODE)ch;
assert ((ch > 0x007F) && (ch <= 0x07FF));
*p++ = (Py_UNICODE)ch;
break;
case 3:
/* XXX: surrogates shouldn't be valid UTF-8!
see http://www.unicode.org/versions/Unicode5.2.0/ch03.pdf
(table 3-7) and http://www.rfc-editor.org/rfc/rfc3629.txt
Uncomment the 2 lines below to make them invalid,
codepoints: d800-dfff; UTF-8: \xed\xa0\x80-\xed\xbf\xbf. */
if ((s[1] & 0xc0) != 0x80 ||
(s[2] & 0xc0) != 0x80) {
errmsg = "invalid data";
(s[2] & 0xc0) != 0x80 ||
((unsigned char)s[0] == 0xE0 &&
(unsigned char)s[1] < 0xA0)/* ||
((unsigned char)s[0] == 0xED &&
(unsigned char)s[1] > 0x9F)*/) {
errmsg = "invalid continuation byte";
startinpos = s-starts;
endinpos = startinpos+3;
endinpos = startinpos + 1;
/* if s[1] first two bits are 1 and 0, then the invalid
continuation byte is s[2], so increment endinpos by 1,
if not, s[1] is invalid and endinpos doesn't need to
be incremented. */
if ((s[1] & 0xC0) == 0x80)
endinpos++;
goto utf8Error;
}
ch = ((s[0] & 0x0f) << 12) + ((s[1] & 0x3f) << 6) + (s[2] & 0x3f);
if (ch < 0x0800) {
/* Note: UTF-8 encodings of surrogates are considered
legal UTF-8 sequences;
XXX For wide builds (UCS-4) we should probably try
to recombine the surrogates into a single code
unit.
*/
errmsg = "illegal encoding";
startinpos = s-starts;
endinpos = startinpos+3;
goto utf8Error;
}
else
*p++ = (Py_UNICODE)ch;
assert ((ch > 0x07FF) && (ch <= 0xFFFF));
*p++ = (Py_UNICODE)ch;
break;
case 4:
if ((s[1] & 0xc0) != 0x80 ||
(s[2] & 0xc0) != 0x80 ||
(s[3] & 0xc0) != 0x80) {
errmsg = "invalid data";
(s[3] & 0xc0) != 0x80 ||
((unsigned char)s[0] == 0xF0 &&
(unsigned char)s[1] < 0x90) ||
((unsigned char)s[0] == 0xF4 &&
(unsigned char)s[1] > 0x8F)) {
errmsg = "invalid continuation byte";
startinpos = s-starts;
endinpos = startinpos+4;
endinpos = startinpos + 1;
if ((s[1] & 0xC0) == 0x80) {
endinpos++;
if ((s[2] & 0xC0) == 0x80)
endinpos++;
}
goto utf8Error;
}
ch = ((s[0] & 0x7) << 18) + ((s[1] & 0x3f) << 12) +
((s[2] & 0x3f) << 6) + (s[3] & 0x3f);
/* validate and convert to UTF-16 */
if ((ch < 0x10000) /* minimum value allowed for 4
byte encoding */
|| (ch > 0x10ffff)) /* maximum value allowed for
UTF-16 */
{
errmsg = "illegal encoding";
startinpos = s-starts;
endinpos = startinpos+4;
goto utf8Error;
}
((s[2] & 0x3f) << 6) + (s[3] & 0x3f);
assert ((ch > 0xFFFF) && (ch <= 0x10ffff));
#ifdef Py_UNICODE_WIDE
*p++ = (Py_UNICODE)ch;
#else
......@@ -1911,13 +1911,6 @@ PyObject *PyUnicode_DecodeUTF8Stateful(const char *s,
*p++ = (Py_UNICODE)(0xDC00 + (ch & 0x03FF));
#endif
break;
default:
/* Other sizes are only needed for UCS-4 */
errmsg = "unsupported Unicode code range";
startinpos = s-starts;
endinpos = startinpos+n;
goto utf8Error;
}
s += n;
continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment