You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

592 lines
21 KiB

4 years ago
  1. """
  2. This module contains general purpose URL functions not found in the standard
  3. library.
  4. """
  5. import base64
  6. import codecs
  7. import os
  8. import re
  9. import posixpath
  10. import warnings
  11. import string
  12. from collections import namedtuple
  13. import six
  14. from six.moves.urllib.parse import (urljoin, urlsplit, urlunsplit,
  15. urldefrag, urlencode, urlparse,
  16. quote, parse_qs, parse_qsl,
  17. ParseResult, unquote, urlunparse)
  18. from six.moves.urllib.request import pathname2url, url2pathname
  19. from w3lib.util import to_bytes, to_native_str, to_unicode
  20. # error handling function for bytes-to-Unicode decoding errors with URLs
  21. def _quote_byte(error):
  22. return (to_unicode(quote(error.object[error.start:error.end])), error.end)
  23. codecs.register_error('percentencode', _quote_byte)
  24. # constants from RFC 3986, Section 2.2 and 2.3
  25. RFC3986_GEN_DELIMS = b':/?#[]@'
  26. RFC3986_SUB_DELIMS = b"!$&'()*+,;="
  27. RFC3986_RESERVED = RFC3986_GEN_DELIMS + RFC3986_SUB_DELIMS
  28. RFC3986_UNRESERVED = (string.ascii_letters + string.digits + "-._~").encode('ascii')
  29. EXTRA_SAFE_CHARS = b'|' # see https://github.com/scrapy/w3lib/pull/25
  30. _safe_chars = RFC3986_RESERVED + RFC3986_UNRESERVED + EXTRA_SAFE_CHARS + b'%'
  31. def safe_url_string(url, encoding='utf8', path_encoding='utf8'):
  32. """Convert the given URL into a legal URL by escaping unsafe characters
  33. according to RFC-3986.
  34. If a bytes URL is given, it is first converted to `str` using the given
  35. encoding (which defaults to 'utf-8'). 'utf-8' encoding is used for
  36. URL path component (unless overriden by path_encoding), and given
  37. encoding is used for query string or form data.
  38. When passing an encoding, you should use the encoding of the
  39. original page (the page from which the URL was extracted from).
  40. Calling this function on an already "safe" URL will return the URL
  41. unmodified.
  42. Always returns a native `str` (bytes in Python2, unicode in Python3).
  43. """
  44. # Python3's urlsplit() chokes on bytes input with non-ASCII chars,
  45. # so let's decode (to Unicode) using page encoding:
  46. # - it is assumed that a raw bytes input comes from a document
  47. # encoded with the supplied encoding (or UTF8 by default)
  48. # - if the supplied (or default) encoding chokes,
  49. # percent-encode offending bytes
  50. parts = urlsplit(to_unicode(url, encoding=encoding,
  51. errors='percentencode'))
  52. # IDNA encoding can fail for too long labels (>63 characters)
  53. # or missing labels (e.g. http://.example.com)
  54. try:
  55. netloc = parts.netloc.encode('idna')
  56. except UnicodeError:
  57. netloc = parts.netloc
  58. # quote() in Python2 return type follows input type;
  59. # quote() in Python3 always returns Unicode (native str)
  60. return urlunsplit((
  61. to_native_str(parts.scheme),
  62. to_native_str(netloc).rstrip(':'),
  63. # default encoding for path component SHOULD be UTF-8
  64. quote(to_bytes(parts.path, path_encoding), _safe_chars),
  65. # encoding of query and fragment follows page encoding
  66. # or form-charset (if known and passed)
  67. quote(to_bytes(parts.query, encoding), _safe_chars),
  68. quote(to_bytes(parts.fragment, encoding), _safe_chars),
  69. ))
  70. _parent_dirs = re.compile(r'/?(\.\./)+')
  71. def safe_download_url(url):
  72. """ Make a url for download. This will call safe_url_string
  73. and then strip the fragment, if one exists. The path will
  74. be normalised.
  75. If the path is outside the document root, it will be changed
  76. to be within the document root.
  77. """
  78. safe_url = safe_url_string(url)
  79. scheme, netloc, path, query, _ = urlsplit(safe_url)
  80. if path:
  81. path = _parent_dirs.sub('', posixpath.normpath(path))
  82. if url.endswith('/') and not path.endswith('/'):
  83. path += '/'
  84. else:
  85. path = '/'
  86. return urlunsplit((scheme, netloc, path, query, ''))
  87. def is_url(text):
  88. return text.partition("://")[0] in ('file', 'http', 'https')
  89. def url_query_parameter(url, parameter, default=None, keep_blank_values=0):
  90. """Return the value of a url parameter, given the url and parameter name
  91. General case:
  92. >>> import w3lib.url
  93. >>> w3lib.url.url_query_parameter("product.html?id=200&foo=bar", "id")
  94. '200'
  95. >>>
  96. Return a default value if the parameter is not found:
  97. >>> w3lib.url.url_query_parameter("product.html?id=200&foo=bar", "notthere", "mydefault")
  98. 'mydefault'
  99. >>>
  100. Returns None if `keep_blank_values` not set or 0 (default):
  101. >>> w3lib.url.url_query_parameter("product.html?id=", "id")
  102. >>>
  103. Returns an empty string if `keep_blank_values` set to 1:
  104. >>> w3lib.url.url_query_parameter("product.html?id=", "id", keep_blank_values=1)
  105. ''
  106. >>>
  107. """
  108. queryparams = parse_qs(
  109. urlsplit(str(url))[3],
  110. keep_blank_values=keep_blank_values
  111. )
  112. return queryparams.get(parameter, [default])[0]
  113. def url_query_cleaner(url, parameterlist=(), sep='&', kvsep='=', remove=False, unique=True, keep_fragments=False):
  114. """Clean URL arguments leaving only those passed in the parameterlist keeping order
  115. >>> import w3lib.url
  116. >>> w3lib.url.url_query_cleaner("product.html?id=200&foo=bar&name=wired", ('id',))
  117. 'product.html?id=200'
  118. >>> w3lib.url.url_query_cleaner("product.html?id=200&foo=bar&name=wired", ['id', 'name'])
  119. 'product.html?id=200&name=wired'
  120. >>>
  121. If `unique` is ``False``, do not remove duplicated keys
  122. >>> w3lib.url.url_query_cleaner("product.html?d=1&e=b&d=2&d=3&other=other", ['d'], unique=False)
  123. 'product.html?d=1&d=2&d=3'
  124. >>>
  125. If `remove` is ``True``, leave only those **not in parameterlist**.
  126. >>> w3lib.url.url_query_cleaner("product.html?id=200&foo=bar&name=wired", ['id'], remove=True)
  127. 'product.html?foo=bar&name=wired'
  128. >>> w3lib.url.url_query_cleaner("product.html?id=2&foo=bar&name=wired", ['id', 'foo'], remove=True)
  129. 'product.html?name=wired'
  130. >>>
  131. By default, URL fragments are removed. If you need to preserve fragments,
  132. pass the ``keep_fragments`` argument as ``True``.
  133. >>> w3lib.url.url_query_cleaner('http://domain.tld/?bla=123#123123', ['bla'], remove=True, keep_fragments=True)
  134. 'http://domain.tld/#123123'
  135. """
  136. if isinstance(parameterlist, (six.text_type, bytes)):
  137. parameterlist = [parameterlist]
  138. url, fragment = urldefrag(url)
  139. base, _, query = url.partition('?')
  140. seen = set()
  141. querylist = []
  142. for ksv in query.split(sep):
  143. k, _, _ = ksv.partition(kvsep)
  144. if unique and k in seen:
  145. continue
  146. elif remove and k in parameterlist:
  147. continue
  148. elif not remove and k not in parameterlist:
  149. continue
  150. else:
  151. querylist.append(ksv)
  152. seen.add(k)
  153. url = '?'.join([base, sep.join(querylist)]) if querylist else base
  154. if keep_fragments:
  155. url += '#' + fragment
  156. return url
  157. def add_or_replace_parameter(url, name, new_value):
  158. """Add or remove a parameter to a given url
  159. >>> import w3lib.url
  160. >>> w3lib.url.add_or_replace_parameter('http://www.example.com/index.php', 'arg', 'v')
  161. 'http://www.example.com/index.php?arg=v'
  162. >>> w3lib.url.add_or_replace_parameter('http://www.example.com/index.php?arg1=v1&arg2=v2&arg3=v3', 'arg4', 'v4')
  163. 'http://www.example.com/index.php?arg1=v1&arg2=v2&arg3=v3&arg4=v4'
  164. >>> w3lib.url.add_or_replace_parameter('http://www.example.com/index.php?arg1=v1&arg2=v2&arg3=v3', 'arg3', 'v3new')
  165. 'http://www.example.com/index.php?arg1=v1&arg2=v2&arg3=v3new'
  166. >>>
  167. """
  168. parsed = urlsplit(url)
  169. args = parse_qsl(parsed.query, keep_blank_values=True)
  170. new_args = []
  171. found = False
  172. for name_, value_ in args:
  173. if name_ == name:
  174. new_args.append((name_, new_value))
  175. found = True
  176. else:
  177. new_args.append((name_, value_))
  178. if not found:
  179. new_args.append((name, new_value))
  180. query = urlencode(new_args)
  181. return urlunsplit(parsed._replace(query=query))
  182. def path_to_file_uri(path):
  183. """Convert local filesystem path to legal File URIs as described in:
  184. http://en.wikipedia.org/wiki/File_URI_scheme
  185. """
  186. x = pathname2url(os.path.abspath(path))
  187. if os.name == 'nt':
  188. x = x.replace('|', ':') # http://bugs.python.org/issue5861
  189. return 'file:///%s' % x.lstrip('/')
  190. def file_uri_to_path(uri):
  191. """Convert File URI to local filesystem path according to:
  192. http://en.wikipedia.org/wiki/File_URI_scheme
  193. """
  194. uri_path = urlparse(uri).path
  195. return url2pathname(uri_path)
  196. def any_to_uri(uri_or_path):
  197. """If given a path name, return its File URI, otherwise return it
  198. unmodified
  199. """
  200. if os.path.splitdrive(uri_or_path)[0]:
  201. return path_to_file_uri(uri_or_path)
  202. u = urlparse(uri_or_path)
  203. return uri_or_path if u.scheme else path_to_file_uri(uri_or_path)
  204. # ASCII characters.
  205. _char = set(map(chr, range(127)))
  206. # RFC 2045 token.
  207. _token = r'[{}]+'.format(re.escape(''.join(_char -
  208. # Control characters.
  209. set(map(chr, range(0, 32))) -
  210. # tspecials and space.
  211. set('()<>@,;:\\"/[]?= '))))
  212. # RFC 822 quoted-string, without surrounding quotation marks.
  213. _quoted_string = r'(?:[{}]|(?:\\[{}]))*'.format(
  214. re.escape(''.join(_char - {'"', '\\', '\r'})),
  215. re.escape(''.join(_char))
  216. )
  217. # Encode the regular expression strings to make them into bytes, as Python 3
  218. # bytes have no format() method, but bytes must be passed to re.compile() in
  219. # order to make a pattern object that can be used to match on bytes.
  220. # RFC 2397 mediatype.
  221. _mediatype_pattern = re.compile(
  222. r'{token}/{token}'.format(token=_token).encode()
  223. )
  224. _mediatype_parameter_pattern = re.compile(
  225. r';({token})=(?:({token})|"({quoted})")'.format(token=_token,
  226. quoted=_quoted_string
  227. ).encode()
  228. )
  229. _ParseDataURIResult = namedtuple("ParseDataURIResult",
  230. "media_type media_type_parameters data")
  231. def parse_data_uri(uri):
  232. """
  233. Parse a data: URI, returning a 3-tuple of media type, dictionary of media
  234. type parameters, and data.
  235. """
  236. if not isinstance(uri, bytes):
  237. uri = safe_url_string(uri).encode('ascii')
  238. try:
  239. scheme, uri = uri.split(b':', 1)
  240. except ValueError:
  241. raise ValueError("invalid URI")
  242. if scheme.lower() != b'data':
  243. raise ValueError("not a data URI")
  244. # RFC 3986 section 2.1 allows percent encoding to escape characters that
  245. # would be interpreted as delimiters, implying that actual delimiters
  246. # should not be percent-encoded.
  247. # Decoding before parsing will allow malformed URIs with percent-encoded
  248. # delimiters, but it makes parsing easier and should not affect
  249. # well-formed URIs, as the delimiters used in this URI scheme are not
  250. # allowed, percent-encoded or not, in tokens.
  251. if six.PY2:
  252. uri = unquote(uri)
  253. else:
  254. uri = unquote_to_bytes(uri)
  255. media_type = "text/plain"
  256. media_type_params = {}
  257. m = _mediatype_pattern.match(uri)
  258. if m:
  259. media_type = m.group().decode()
  260. uri = uri[m.end():]
  261. else:
  262. media_type_params['charset'] = "US-ASCII"
  263. while True:
  264. m = _mediatype_parameter_pattern.match(uri)
  265. if m:
  266. attribute, value, value_quoted = m.groups()
  267. if value_quoted:
  268. value = re.sub(br'\\(.)', r'\1', value_quoted)
  269. media_type_params[attribute.decode()] = value.decode()
  270. uri = uri[m.end():]
  271. else:
  272. break
  273. try:
  274. is_base64, data = uri.split(b',', 1)
  275. except ValueError:
  276. raise ValueError("invalid data URI")
  277. if is_base64:
  278. if is_base64 != b";base64":
  279. raise ValueError("invalid data URI")
  280. data = base64.b64decode(data)
  281. return _ParseDataURIResult(media_type, media_type_params, data)
  282. __all__ = ["add_or_replace_parameter",
  283. "any_to_uri",
  284. "canonicalize_url",
  285. "file_uri_to_path",
  286. "is_url",
  287. "parse_data_uri",
  288. "path_to_file_uri",
  289. "safe_download_url",
  290. "safe_url_string",
  291. "url_query_cleaner",
  292. "url_query_parameter",
  293. # this last one is deprecated ; include it to be on the safe side
  294. "urljoin_rfc"]
  295. def _safe_ParseResult(parts, encoding='utf8', path_encoding='utf8'):
  296. # IDNA encoding can fail for too long labels (>63 characters)
  297. # or missing labels (e.g. http://.example.com)
  298. try:
  299. netloc = parts.netloc.encode('idna')
  300. except UnicodeError:
  301. netloc = parts.netloc
  302. return (
  303. to_native_str(parts.scheme),
  304. to_native_str(netloc),
  305. # default encoding for path component SHOULD be UTF-8
  306. quote(to_bytes(parts.path, path_encoding), _safe_chars),
  307. quote(to_bytes(parts.params, path_encoding), _safe_chars),
  308. # encoding of query and fragment follows page encoding
  309. # or form-charset (if known and passed)
  310. quote(to_bytes(parts.query, encoding), _safe_chars),
  311. quote(to_bytes(parts.fragment, encoding), _safe_chars)
  312. )
  313. def canonicalize_url(url, keep_blank_values=True, keep_fragments=False,
  314. encoding=None):
  315. r"""Canonicalize the given url by applying the following procedures:
  316. - sort query arguments, first by key, then by value
  317. - percent encode paths ; non-ASCII characters are percent-encoded
  318. using UTF-8 (RFC-3986)
  319. - percent encode query arguments ; non-ASCII characters are percent-encoded
  320. using passed `encoding` (UTF-8 by default)
  321. - normalize all spaces (in query arguments) '+' (plus symbol)
  322. - normalize percent encodings case (%2f -> %2F)
  323. - remove query arguments with blank values (unless `keep_blank_values` is True)
  324. - remove fragments (unless `keep_fragments` is True)
  325. The url passed can be bytes or unicode, while the url returned is
  326. always a native str (bytes in Python 2, unicode in Python 3).
  327. >>> import w3lib.url
  328. >>>
  329. >>> # sorting query arguments
  330. >>> w3lib.url.canonicalize_url('http://www.example.com/do?c=3&b=5&b=2&a=50')
  331. 'http://www.example.com/do?a=50&b=2&b=5&c=3'
  332. >>>
  333. >>> # UTF-8 conversion + percent-encoding of non-ASCII characters
  334. >>> w3lib.url.canonicalize_url(u'http://www.example.com/r\u00e9sum\u00e9')
  335. 'http://www.example.com/r%C3%A9sum%C3%A9'
  336. >>>
  337. For more examples, see the tests in `tests/test_url.py`.
  338. """
  339. # If supplied `encoding` is not compatible with all characters in `url`,
  340. # fallback to UTF-8 as safety net.
  341. # UTF-8 can handle all Unicode characters,
  342. # so we should be covered regarding URL normalization,
  343. # if not for proper URL expected by remote website.
  344. try:
  345. scheme, netloc, path, params, query, fragment = _safe_ParseResult(
  346. parse_url(url), encoding=encoding)
  347. except UnicodeEncodeError as e:
  348. scheme, netloc, path, params, query, fragment = _safe_ParseResult(
  349. parse_url(url), encoding='utf8')
  350. # 1. decode query-string as UTF-8 (or keep raw bytes),
  351. # sort values,
  352. # and percent-encode them back
  353. if six.PY2:
  354. keyvals = parse_qsl(query, keep_blank_values)
  355. else:
  356. # Python3's urllib.parse.parse_qsl does not work as wanted
  357. # for percent-encoded characters that do not match passed encoding,
  358. # they get lost.
  359. #
  360. # e.g., 'q=b%a3' becomes [('q', 'b\ufffd')]
  361. # (ie. with 'REPLACEMENT CHARACTER' (U+FFFD),
  362. # instead of \xa3 that you get with Python2's parse_qsl)
  363. #
  364. # what we want here is to keep raw bytes, and percent encode them
  365. # so as to preserve whatever encoding what originally used.
  366. #
  367. # See https://tools.ietf.org/html/rfc3987#section-6.4:
  368. #
  369. # For example, it is possible to have a URI reference of
  370. # "http://www.example.org/r%E9sum%E9.xml#r%C3%A9sum%C3%A9", where the
  371. # document name is encoded in iso-8859-1 based on server settings, but
  372. # where the fragment identifier is encoded in UTF-8 according to
  373. # [XPointer]. The IRI corresponding to the above URI would be (in XML
  374. # notation)
  375. # "http://www.example.org/r%E9sum%E9.xml#r&#xE9;sum&#xE9;".
  376. # Similar considerations apply to query parts. The functionality of
  377. # IRIs (namely, to be able to include non-ASCII characters) can only be
  378. # used if the query part is encoded in UTF-8.
  379. keyvals = parse_qsl_to_bytes(query, keep_blank_values)
  380. keyvals.sort()
  381. query = urlencode(keyvals)
  382. # 2. decode percent-encoded sequences in path as UTF-8 (or keep raw bytes)
  383. # and percent-encode path again (this normalizes to upper-case %XX)
  384. uqp = _unquotepath(path)
  385. path = quote(uqp, _safe_chars) or '/'
  386. fragment = '' if not keep_fragments else fragment
  387. # every part should be safe already
  388. return urlunparse((scheme,
  389. netloc.lower().rstrip(':'),
  390. path,
  391. params,
  392. query,
  393. fragment))
  394. def _unquotepath(path):
  395. for reserved in ('2f', '2F', '3f', '3F'):
  396. path = path.replace('%' + reserved, '%25' + reserved.upper())
  397. if six.PY2:
  398. # in Python 2, '%a3' becomes '\xa3', which is what we want
  399. return unquote(path)
  400. else:
  401. # in Python 3,
  402. # standard lib's unquote() does not work for non-UTF-8
  403. # percent-escaped characters, they get lost.
  404. # e.g., '%a3' becomes 'REPLACEMENT CHARACTER' (U+FFFD)
  405. #
  406. # unquote_to_bytes() returns raw bytes instead
  407. return unquote_to_bytes(path)
  408. def parse_url(url, encoding=None):
  409. """Return urlparsed url from the given argument (which could be an already
  410. parsed url)
  411. """
  412. if isinstance(url, ParseResult):
  413. return url
  414. return urlparse(to_unicode(url, encoding))
  415. if not six.PY2:
  416. from urllib.parse import _coerce_args, unquote_to_bytes
  417. def parse_qsl_to_bytes(qs, keep_blank_values=False):
  418. """Parse a query given as a string argument.
  419. Data are returned as a list of name, value pairs as bytes.
  420. Arguments:
  421. qs: percent-encoded query string to be parsed
  422. keep_blank_values: flag indicating whether blank values in
  423. percent-encoded queries should be treated as blank strings. A
  424. true value indicates that blanks should be retained as blank
  425. strings. The default false value indicates that blank values
  426. are to be ignored and treated as if they were not included.
  427. """
  428. # This code is the same as Python3's parse_qsl()
  429. # (at https://hg.python.org/cpython/rev/c38ac7ab8d9a)
  430. # except for the unquote(s, encoding, errors) calls replaced
  431. # with unquote_to_bytes(s)
  432. qs, _coerce_result = _coerce_args(qs)
  433. pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
  434. r = []
  435. for name_value in pairs:
  436. if not name_value:
  437. continue
  438. nv = name_value.split('=', 1)
  439. if len(nv) != 2:
  440. # Handle case of a control-name with no equal sign
  441. if keep_blank_values:
  442. nv.append('')
  443. else:
  444. continue
  445. if len(nv[1]) or keep_blank_values:
  446. name = nv[0].replace('+', ' ')
  447. name = unquote_to_bytes(name)
  448. name = _coerce_result(name)
  449. value = nv[1].replace('+', ' ')
  450. value = unquote_to_bytes(value)
  451. value = _coerce_result(value)
  452. r.append((name, value))
  453. return r
  454. def urljoin_rfc(base, ref, encoding='utf-8'):
  455. r"""
  456. .. warning::
  457. This function is deprecated and will be removed in future.
  458. It is not supported with Python 3.
  459. Please use ``urlparse.urljoin`` instead.
  460. Same as urlparse.urljoin but supports unicode values in base and ref
  461. parameters (in which case they will be converted to str using the given
  462. encoding).
  463. Always returns a str.
  464. >>> import w3lib.url
  465. >>> w3lib.url.urljoin_rfc('http://www.example.com/path/index.html', u'/otherpath/index2.html')
  466. 'http://www.example.com/otherpath/index2.html'
  467. >>>
  468. >>> # Note: the following does not work in Python 3
  469. >>> w3lib.url.urljoin_rfc(b'http://www.example.com/path/index.html', u'fran\u00e7ais/d\u00e9part.htm') # doctest: +SKIP
  470. 'http://www.example.com/path/fran\xc3\xa7ais/d\xc3\xa9part.htm'
  471. >>>
  472. """
  473. warnings.warn("w3lib.url.urljoin_rfc is deprecated, use urlparse.urljoin instead",
  474. DeprecationWarning)
  475. str_base = to_bytes(base, encoding)
  476. str_ref = to_bytes(ref, encoding)
  477. return urljoin(str_base, str_ref)