summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAmaury Forgeot d'Arc <amauryfa@gmail.com>2008-09-04 22:34:09 +0000
committerAmaury Forgeot d'Arc <amauryfa@gmail.com>2008-09-04 22:34:09 +0000
commit1b933ed50aef2c31e003a001cef8647244ebfa0e (patch)
tree1d434753b2e2fd9e1454f4bd614c73a90bab903f
parent1d6a16bf3838bfb89efdd5e338b247324d962010 (diff)
downloadcpython-git-1b933ed50aef2c31e003a001cef8647244ebfa0e.tar.gz
#3773: Check for errors around the use of PyTokenizer_FindEncoding().
reviewed by Brett Cannon.
-rw-r--r--Misc/NEWS3
-rw-r--r--Parser/tokenizer.c5
-rw-r--r--Python/import.c2
3 files changed, 9 insertions, 1 deletions
diff --git a/Misc/NEWS b/Misc/NEWS
index 0e51c80c8b..889cead8a6 100644
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -12,6 +12,9 @@ What's New in Python 3.0 release candidate 1
Core and Builtins
-----------------
+- Issue 3774: Added a few more checks in PyTokenizer_FindEncoding to handle
+ error conditions.
+
- Issue 3594: Fix Parser/tokenizer.c:fp_setreadl() to open the file being
tokenized by either a file path or file pointer for the benefit of
PyTokenizer_FindEncoding().
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c
index e4cf8e4cc5..a040696579 100644
--- a/Parser/tokenizer.c
+++ b/Parser/tokenizer.c
@@ -1610,7 +1610,10 @@ PyTokenizer_FindEncoding(int fd)
fclose(fp);
if (tok->encoding) {
encoding = (char *)PyMem_MALLOC(strlen(tok->encoding) + 1);
- strcpy(encoding, tok->encoding);
+ if (encoding)
+ strcpy(encoding, tok->encoding);
+ else
+ PyErr_NoMemory();
}
PyTokenizer_Free(tok);
return encoding;
diff --git a/Python/import.c b/Python/import.c
index d87d7515a0..9c077fe8a6 100644
--- a/Python/import.c
+++ b/Python/import.c
@@ -2830,6 +2830,8 @@ call_find_module(char *name, PyObject *path)
memory. */
found_encoding = PyTokenizer_FindEncoding(fd);
lseek(fd, 0, 0); /* Reset position */
+ if (found_encoding == NULL && PyErr_Occurred())
+ return NULL;
encoding = (found_encoding != NULL) ? found_encoding :
(char*)PyUnicode_GetDefaultEncoding();
}