Commit cca3a3f3 authored by Antoine Pitrou's avatar Antoine Pitrou

Issue #8941: decoding big endian UTF-32 data in UCS-2 builds could crash

the interpreter with characters outside the Basic Multilingual Plane
(higher than 0x10000).
parent c6660cf4
......@@ -315,6 +315,16 @@ class UTF32Test(ReadTest):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = '\xff\xfe\x00\x00' + '\x00\x00\x01\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = '\x00\x00\xfe\xff' + '\x00\x01\x00\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
class UTF32LETest(ReadTest):
encoding = "utf-32-le"
......@@ -348,6 +358,13 @@ class UTF32LETest(ReadTest):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = '\x00\x00\x01\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
class UTF32BETest(ReadTest):
encoding = "utf-32-be"
......@@ -381,6 +398,14 @@ class UTF32BETest(ReadTest):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = '\x00\x01\x00\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
class UTF16Test(ReadTest):
encoding = "utf-16"
......
......@@ -12,6 +12,10 @@ What's New in Python 2.7 release candidate 2?
Core and Builtins
-----------------
- Issue #8941: decoding big endian UTF-32 data in UCS-2 builds could crash
the interpreter with characters outside the Basic Multilingual Plane
(higher than 0x10000).
- In the unicode/str.format(), raise a ValueError when indexes to arguments are
too large.
......
......@@ -2207,11 +2207,11 @@ PyUnicode_DecodeUTF32Stateful(const char *s,
PyUnicodeObject *unicode;
Py_UNICODE *p;
#ifndef Py_UNICODE_WIDE
int i, pairs;
int pairs = 0;
#else
const int pairs = 0;
#endif
const unsigned char *q, *e;
const unsigned char *q, *e, *qq;
int bo = 0; /* assume native ordering by default */
const char *errmsg = "";
/* Offsets from q for retrieving bytes in the right order. */
......@@ -2222,23 +2222,7 @@ PyUnicode_DecodeUTF32Stateful(const char *s,
#endif
PyObject *errorHandler = NULL;
PyObject *exc = NULL;
/* On narrow builds we split characters outside the BMP into two
codepoints => count how much extra space we need. */
#ifndef Py_UNICODE_WIDE
for (i = pairs = 0; i < size/4; i++)
if (((Py_UCS4 *)s)[i] >= 0x10000)
pairs++;
#endif
/* This might be one to much, because of a BOM */
unicode = _PyUnicode_New((size+3)/4+pairs);
if (!unicode)
return NULL;
if (size == 0)
return (PyObject *)unicode;
/* Unpack UTF-32 encoded data */
p = unicode->str;
q = (unsigned char *)s;
e = q + size;
......@@ -2290,6 +2274,24 @@ PyUnicode_DecodeUTF32Stateful(const char *s,
iorder[3] = 0;
}
/* On narrow builds we split characters outside the BMP into two
codepoints => count how much extra space we need. */
#ifndef Py_UNICODE_WIDE
for (qq = q; qq < e; qq += 4)
if (qq[iorder[2]] != 0 || qq[iorder[3]] != 0)
pairs++;
#endif
/* This might be one to much, because of a BOM */
unicode = _PyUnicode_New((size+3)/4+pairs);
if (!unicode)
return NULL;
if (size == 0)
return (PyObject *)unicode;
/* Unpack UTF-32 encoded data */
p = unicode->str;
while (q < e) {
Py_UCS4 ch;
/* remaining bytes at the end? (size should be divisible by 4) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment