1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
|
This patch consists of the following commits from the 2.7 branch:
98a4dcefbbc3bce5ab07e7c0830a183157250259
61599b050c621386a3fc6bc480359e2d3bb93de
2b578479b96aa3deeeb8bac313a02b5cf3cb1aff
507bd8cde60ced74d13a1ffa883bb9b0e73c38be (not part of security fix, but dependent)
diff -Naur python2.7-2.7.16.orig/Lib/test/test_urlparse.py python2.7-2.7.16/Lib/test/test_urlparse.py
--- python2.7-2.7.16.orig/Lib/test/test_urlparse.py 2019-10-09 17:52:19.875053907 +0200
+++ python2.7-2.7.16/Lib/test/test_urlparse.py 2019-10-09 17:55:02.936834540 +0200
@@ -641,12 +641,29 @@
self.assertIn(u'\u2100', denorm_chars)
self.assertIn(u'\uFF03', denorm_chars)
+ # bpo-36742: Verify port separators are ignored when they
+ # existed prior to decomposition
+ urlparse.urlsplit(u'http://\u30d5\u309a:80')
+ with self.assertRaises(ValueError):
+ urlparse.urlsplit(u'http://\u30d5\u309a\ufe1380')
+
for scheme in [u"http", u"https", u"ftp"]:
- for c in denorm_chars:
- url = u"{}://netloc{}false.netloc/path".format(scheme, c)
- print "Checking %r" % url
- with self.assertRaises(ValueError):
- urlparse.urlsplit(url)
+ for netloc in [u"netloc{}false.netloc", u"n{}user@netloc"]:
+ for c in denorm_chars:
+ url = u"{}://{}/path".format(scheme, netloc.format(c))
+ if test_support.verbose:
+ print "Checking %r" % url
+ with self.assertRaises(ValueError):
+ urlparse.urlsplit(url)
+
+ # check error message: invalid netloc must be formated with repr()
+ # to get an ASCII error message
+ with self.assertRaises(ValueError) as cm:
+ urlparse.urlsplit(u'http://example.com\uFF03@bing.com')
+ self.assertEqual(str(cm.exception),
+ "netloc u'example.com\\uff03@bing.com' contains invalid characters "
+ "under NFKC normalization")
+ self.assertIsInstance(cm.exception.args[0], str)
def test_main():
test_support.run_unittest(UrlParseTestCase)
diff -Naur python2.7-2.7.16.orig/Lib/urlparse.py python2.7-2.7.16/Lib/urlparse.py
--- python2.7-2.7.16.orig/Lib/urlparse.py 2019-10-09 17:52:19.875053907 +0200
+++ python2.7-2.7.16/Lib/urlparse.py 2019-10-09 17:55:02.936834540 +0200
@@ -171,14 +171,18 @@
# looking for characters like \u2100 that expand to 'a/c'
# IDNA uses NFKC equivalence, so normalize for this check
import unicodedata
- netloc2 = unicodedata.normalize('NFKC', netloc)
- if netloc == netloc2:
+ n = netloc.replace(u'@', u'') # ignore characters already included
+ n = n.replace(u':', u'') # but not the surrounding text
+ n = n.replace(u'#', u'')
+ n = n.replace(u'?', u'')
+ netloc2 = unicodedata.normalize('NFKC', n)
+ if n == netloc2:
return
- _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
for c in '/?#@:':
if c in netloc2:
- raise ValueError("netloc '" + netloc2 + "' contains invalid " +
- "characters under NFKC normalization")
+ raise ValueError("netloc %r contains invalid characters "
+ "under NFKC normalization"
+ % netloc)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
|