You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

999 lines
33 KiB

4 years ago
  1. import asyncio
  2. import functools
  3. import random
  4. import sys
  5. import traceback
  6. import warnings
  7. from collections import defaultdict, deque
  8. from contextlib import suppress
  9. from http.cookies import SimpleCookie
  10. from itertools import cycle, islice
  11. from time import monotonic
  12. import attr
  13. from . import hdrs, helpers
  14. from .client_exceptions import (ClientConnectionError,
  15. ClientConnectorCertificateError,
  16. ClientConnectorError, ClientConnectorSSLError,
  17. ClientHttpProxyError,
  18. ClientProxyConnectionError,
  19. ServerFingerprintMismatch, cert_errors,
  20. ssl_errors)
  21. from .client_proto import ResponseHandler
  22. from .client_reqrep import ClientRequest, Fingerprint, _merge_ssl_params
  23. from .helpers import PY_36, CeilTimeout, is_ip_address, noop, sentinel
  24. from .locks import EventResultOrError
  25. from .resolver import DefaultResolver
  26. try:
  27. import ssl
  28. except ImportError: # pragma: no cover
  29. ssl = None # type: ignore
  30. __all__ = ('BaseConnector', 'TCPConnector', 'UnixConnector')
  31. class Connection:
  32. _source_traceback = None
  33. _transport = None
  34. def __init__(self, connector, key, protocol, loop):
  35. self._key = key
  36. self._connector = connector
  37. self._loop = loop
  38. self._protocol = protocol
  39. self._callbacks = []
  40. if loop.get_debug():
  41. self._source_traceback = traceback.extract_stack(sys._getframe(1))
  42. def __repr__(self):
  43. return 'Connection<{}>'.format(self._key)
  44. def __del__(self, _warnings=warnings):
  45. if self._protocol is not None:
  46. if PY_36:
  47. kwargs = {'source': self}
  48. else:
  49. kwargs = {}
  50. _warnings.warn('Unclosed connection {!r}'.format(self),
  51. ResourceWarning,
  52. **kwargs)
  53. if self._loop.is_closed():
  54. return
  55. self._connector._release(
  56. self._key, self._protocol, should_close=True)
  57. context = {'client_connection': self,
  58. 'message': 'Unclosed connection'}
  59. if self._source_traceback is not None:
  60. context['source_traceback'] = self._source_traceback
  61. self._loop.call_exception_handler(context)
  62. @property
  63. def loop(self):
  64. return self._loop
  65. @property
  66. def transport(self):
  67. return self._protocol.transport
  68. @property
  69. def protocol(self):
  70. return self._protocol
  71. @property
  72. def writer(self):
  73. return self._protocol.writer
  74. def add_callback(self, callback):
  75. if callback is not None:
  76. self._callbacks.append(callback)
  77. def _notify_release(self):
  78. callbacks, self._callbacks = self._callbacks[:], []
  79. for cb in callbacks:
  80. with suppress(Exception):
  81. cb()
  82. def close(self):
  83. self._notify_release()
  84. if self._protocol is not None:
  85. self._connector._release(
  86. self._key, self._protocol, should_close=True)
  87. self._protocol = None
  88. def release(self):
  89. self._notify_release()
  90. if self._protocol is not None:
  91. self._connector._release(
  92. self._key, self._protocol,
  93. should_close=self._protocol.should_close)
  94. self._protocol = None
  95. def detach(self):
  96. self._notify_release()
  97. if self._protocol is not None:
  98. self._connector._release_acquired(self._protocol)
  99. self._protocol = None
  100. @property
  101. def closed(self):
  102. return self._protocol is None or not self._protocol.is_connected()
  103. class _TransportPlaceholder:
  104. """ placeholder for BaseConnector.connect function """
  105. def close(self):
  106. pass
  107. class BaseConnector:
  108. """Base connector class.
  109. keepalive_timeout - (optional) Keep-alive timeout.
  110. force_close - Set to True to force close and do reconnect
  111. after each request (and between redirects).
  112. limit - The total number of simultaneous connections.
  113. limit_per_host - Number of simultaneous connections to one host.
  114. enable_cleanup_closed - Enables clean-up closed ssl transports.
  115. Disabled by default.
  116. loop - Optional event loop.
  117. """
  118. _closed = True # prevent AttributeError in __del__ if ctor was failed
  119. _source_traceback = None
  120. # abort transport after 2 seconds (cleanup broken connections)
  121. _cleanup_closed_period = 2.0
  122. def __init__(self, *, keepalive_timeout=sentinel,
  123. force_close=False, limit=100, limit_per_host=0,
  124. enable_cleanup_closed=False, loop=None):
  125. if force_close:
  126. if keepalive_timeout is not None and \
  127. keepalive_timeout is not sentinel:
  128. raise ValueError('keepalive_timeout cannot '
  129. 'be set if force_close is True')
  130. else:
  131. if keepalive_timeout is sentinel:
  132. keepalive_timeout = 15.0
  133. if loop is None:
  134. loop = asyncio.get_event_loop()
  135. self._closed = False
  136. if loop.get_debug():
  137. self._source_traceback = traceback.extract_stack(sys._getframe(1))
  138. self._conns = {}
  139. self._limit = limit
  140. self._limit_per_host = limit_per_host
  141. self._acquired = set()
  142. self._acquired_per_host = defaultdict(set)
  143. self._keepalive_timeout = keepalive_timeout
  144. self._force_close = force_close
  145. # {host_key: FIFO list of waiters}
  146. self._waiters = defaultdict(deque)
  147. self._loop = loop
  148. self._factory = functools.partial(ResponseHandler, loop=loop)
  149. self.cookies = SimpleCookie()
  150. # start keep-alive connection cleanup task
  151. self._cleanup_handle = None
  152. # start cleanup closed transports task
  153. self._cleanup_closed_handle = None
  154. self._cleanup_closed_disabled = not enable_cleanup_closed
  155. self._cleanup_closed_transports = []
  156. self._cleanup_closed()
  157. def __del__(self, _warnings=warnings):
  158. if self._closed:
  159. return
  160. if not self._conns:
  161. return
  162. conns = [repr(c) for c in self._conns.values()]
  163. self.close()
  164. if PY_36:
  165. kwargs = {'source': self}
  166. else:
  167. kwargs = {}
  168. _warnings.warn("Unclosed connector {!r}".format(self),
  169. ResourceWarning,
  170. **kwargs)
  171. context = {'connector': self,
  172. 'connections': conns,
  173. 'message': 'Unclosed connector'}
  174. if self._source_traceback is not None:
  175. context['source_traceback'] = self._source_traceback
  176. self._loop.call_exception_handler(context)
  177. def __enter__(self):
  178. return self
  179. def __exit__(self, *exc):
  180. self.close()
  181. @property
  182. def force_close(self):
  183. """Ultimately close connection on releasing if True."""
  184. return self._force_close
  185. @property
  186. def limit(self):
  187. """The total number for simultaneous connections.
  188. If limit is 0 the connector has no limit.
  189. The default limit size is 100.
  190. """
  191. return self._limit
  192. @property
  193. def limit_per_host(self):
  194. """The limit_per_host for simultaneous connections
  195. to the same endpoint.
  196. Endpoints are the same if they are have equal
  197. (host, port, is_ssl) triple.
  198. """
  199. return self._limit_per_host
  200. def _cleanup(self):
  201. """Cleanup unused transports."""
  202. if self._cleanup_handle:
  203. self._cleanup_handle.cancel()
  204. now = self._loop.time()
  205. timeout = self._keepalive_timeout
  206. if self._conns:
  207. connections = {}
  208. deadline = now - timeout
  209. for key, conns in self._conns.items():
  210. alive = []
  211. for proto, use_time in conns:
  212. if proto.is_connected():
  213. if use_time - deadline < 0:
  214. transport = proto.close()
  215. if (key.is_ssl and
  216. not self._cleanup_closed_disabled):
  217. self._cleanup_closed_transports.append(
  218. transport)
  219. else:
  220. alive.append((proto, use_time))
  221. if alive:
  222. connections[key] = alive
  223. self._conns = connections
  224. if self._conns:
  225. self._cleanup_handle = helpers.weakref_handle(
  226. self, '_cleanup', timeout, self._loop)
  227. def _drop_acquired_per_host(self, key, val):
  228. acquired_per_host = self._acquired_per_host
  229. if key not in acquired_per_host:
  230. return
  231. conns = acquired_per_host[key]
  232. conns.remove(val)
  233. if not conns:
  234. del self._acquired_per_host[key]
  235. def _cleanup_closed(self):
  236. """Double confirmation for transport close.
  237. Some broken ssl servers may leave socket open without proper close.
  238. """
  239. if self._cleanup_closed_handle:
  240. self._cleanup_closed_handle.cancel()
  241. for transport in self._cleanup_closed_transports:
  242. if transport is not None:
  243. transport.abort()
  244. self._cleanup_closed_transports = []
  245. if not self._cleanup_closed_disabled:
  246. self._cleanup_closed_handle = helpers.weakref_handle(
  247. self, '_cleanup_closed',
  248. self._cleanup_closed_period, self._loop)
  249. def close(self):
  250. """Close all opened transports."""
  251. if self._closed:
  252. return
  253. self._closed = True
  254. try:
  255. if self._loop.is_closed():
  256. return noop()
  257. # cancel cleanup task
  258. if self._cleanup_handle:
  259. self._cleanup_handle.cancel()
  260. # cancel cleanup close task
  261. if self._cleanup_closed_handle:
  262. self._cleanup_closed_handle.cancel()
  263. for data in self._conns.values():
  264. for proto, t0 in data:
  265. proto.close()
  266. for proto in self._acquired:
  267. proto.close()
  268. for transport in self._cleanup_closed_transports:
  269. if transport is not None:
  270. transport.abort()
  271. finally:
  272. self._conns.clear()
  273. self._acquired.clear()
  274. self._waiters.clear()
  275. self._cleanup_handle = None
  276. self._cleanup_closed_transports.clear()
  277. self._cleanup_closed_handle = None
  278. @property
  279. def closed(self):
  280. """Is connector closed.
  281. A readonly property.
  282. """
  283. return self._closed
  284. def _available_connections(self, key):
  285. """
  286. Return number of available connections taking into account
  287. the limit, limit_per_host and the connection key.
  288. If it returns less than 1 means that there is no connections
  289. availables.
  290. """
  291. if self._limit:
  292. # total calc available connections
  293. available = self._limit - len(self._acquired)
  294. # check limit per host
  295. if (self._limit_per_host and available > 0 and
  296. key in self._acquired_per_host):
  297. available = self._limit_per_host - len(
  298. self._acquired_per_host.get(key))
  299. elif self._limit_per_host and key in self._acquired_per_host:
  300. # check limit per host
  301. available = self._limit_per_host - len(
  302. self._acquired_per_host.get(key))
  303. else:
  304. available = 1
  305. return available
  306. async def connect(self, req, traces, timeout):
  307. """Get from pool or create new connection."""
  308. key = req.connection_key
  309. available = self._available_connections(key)
  310. # Wait if there are no available connections.
  311. if available <= 0:
  312. fut = self._loop.create_future()
  313. # This connection will now count towards the limit.
  314. waiters = self._waiters[key]
  315. waiters.append(fut)
  316. if traces:
  317. for trace in traces:
  318. await trace.send_connection_queued_start()
  319. try:
  320. await fut
  321. except BaseException as e:
  322. # remove a waiter even if it was cancelled, normally it's
  323. # removed when it's notified
  324. try:
  325. waiters.remove(fut)
  326. except ValueError: # fut may no longer be in list
  327. pass
  328. raise e
  329. finally:
  330. if not waiters:
  331. try:
  332. del self._waiters[key]
  333. except KeyError:
  334. # the key was evicted before.
  335. pass
  336. if traces:
  337. for trace in traces:
  338. await trace.send_connection_queued_end()
  339. proto = self._get(key)
  340. if proto is None:
  341. placeholder = _TransportPlaceholder()
  342. self._acquired.add(placeholder)
  343. self._acquired_per_host[key].add(placeholder)
  344. if traces:
  345. for trace in traces:
  346. await trace.send_connection_create_start()
  347. try:
  348. proto = await self._create_connection(req, traces, timeout)
  349. if self._closed:
  350. proto.close()
  351. raise ClientConnectionError("Connector is closed.")
  352. except BaseException:
  353. if not self._closed:
  354. self._acquired.remove(placeholder)
  355. self._drop_acquired_per_host(key, placeholder)
  356. self._release_waiter()
  357. raise
  358. else:
  359. if not self._closed:
  360. self._acquired.remove(placeholder)
  361. self._drop_acquired_per_host(key, placeholder)
  362. if traces:
  363. for trace in traces:
  364. await trace.send_connection_create_end()
  365. else:
  366. if traces:
  367. for trace in traces:
  368. await trace.send_connection_reuseconn()
  369. self._acquired.add(proto)
  370. self._acquired_per_host[key].add(proto)
  371. return Connection(self, key, proto, self._loop)
  372. def _get(self, key):
  373. try:
  374. conns = self._conns[key]
  375. except KeyError:
  376. return None
  377. t1 = self._loop.time()
  378. while conns:
  379. proto, t0 = conns.pop()
  380. if proto.is_connected():
  381. if t1 - t0 > self._keepalive_timeout:
  382. transport = proto.close()
  383. # only for SSL transports
  384. if key.is_ssl and not self._cleanup_closed_disabled:
  385. self._cleanup_closed_transports.append(transport)
  386. else:
  387. if not conns:
  388. # The very last connection was reclaimed: drop the key
  389. del self._conns[key]
  390. return proto
  391. # No more connections: drop the key
  392. del self._conns[key]
  393. return None
  394. def _release_waiter(self):
  395. """
  396. Iterates over all waiters till found one that is not finsihed and
  397. belongs to a host that has available connections.
  398. """
  399. if not self._waiters:
  400. return
  401. # Having the dict keys ordered this avoids to iterate
  402. # at the same order at each call.
  403. queues = list(self._waiters.keys())
  404. random.shuffle(queues)
  405. for key in queues:
  406. if self._available_connections(key) < 1:
  407. continue
  408. waiters = self._waiters[key]
  409. while waiters:
  410. waiter = waiters.popleft()
  411. if not waiter.done():
  412. waiter.set_result(None)
  413. return
  414. def _release_acquired(self, key, proto):
  415. if self._closed:
  416. # acquired connection is already released on connector closing
  417. return
  418. try:
  419. self._acquired.remove(proto)
  420. self._drop_acquired_per_host(key, proto)
  421. except KeyError: # pragma: no cover
  422. # this may be result of undetermenistic order of objects
  423. # finalization due garbage collection.
  424. pass
  425. else:
  426. self._release_waiter()
  427. def _release(self, key, protocol, *, should_close=False):
  428. if self._closed:
  429. # acquired connection is already released on connector closing
  430. return
  431. self._release_acquired(key, protocol)
  432. if self._force_close:
  433. should_close = True
  434. if should_close or protocol.should_close:
  435. transport = protocol.close()
  436. if key.is_ssl and not self._cleanup_closed_disabled:
  437. self._cleanup_closed_transports.append(transport)
  438. else:
  439. conns = self._conns.get(key)
  440. if conns is None:
  441. conns = self._conns[key] = []
  442. conns.append((protocol, self._loop.time()))
  443. if self._cleanup_handle is None:
  444. self._cleanup_handle = helpers.weakref_handle(
  445. self, '_cleanup', self._keepalive_timeout, self._loop)
  446. async def _create_connection(self, req, traces, timeout):
  447. raise NotImplementedError()
  448. class _DNSCacheTable:
  449. def __init__(self, ttl=None):
  450. self._addrs_rr = {}
  451. self._timestamps = {}
  452. self._ttl = ttl
  453. def __contains__(self, host):
  454. return host in self._addrs_rr
  455. def add(self, host, addrs):
  456. self._addrs_rr[host] = (cycle(addrs), len(addrs))
  457. if self._ttl:
  458. self._timestamps[host] = monotonic()
  459. def remove(self, host):
  460. self._addrs_rr.pop(host, None)
  461. if self._ttl:
  462. self._timestamps.pop(host, None)
  463. def clear(self):
  464. self._addrs_rr.clear()
  465. self._timestamps.clear()
  466. def next_addrs(self, host):
  467. loop, length = self._addrs_rr[host]
  468. addrs = list(islice(loop, length))
  469. # Consume one more element to shift internal state of `cycle`
  470. next(loop)
  471. return addrs
  472. def expired(self, host):
  473. if self._ttl is None:
  474. return False
  475. return self._timestamps[host] + self._ttl < monotonic()
  476. class TCPConnector(BaseConnector):
  477. """TCP connector.
  478. verify_ssl - Set to True to check ssl certifications.
  479. fingerprint - Pass the binary sha256
  480. digest of the expected certificate in DER format to verify
  481. that the certificate the server presents matches. See also
  482. https://en.wikipedia.org/wiki/Transport_Layer_Security#Certificate_pinning
  483. resolver - Enable DNS lookups and use this
  484. resolver
  485. use_dns_cache - Use memory cache for DNS lookups.
  486. ttl_dns_cache - Max seconds having cached a DNS entry, None forever.
  487. family - socket address family
  488. local_addr - local tuple of (host, port) to bind socket to
  489. keepalive_timeout - (optional) Keep-alive timeout.
  490. force_close - Set to True to force close and do reconnect
  491. after each request (and between redirects).
  492. limit - The total number of simultaneous connections.
  493. limit_per_host - Number of simultaneous connections to one host.
  494. enable_cleanup_closed - Enables clean-up closed ssl transports.
  495. Disabled by default.
  496. loop - Optional event loop.
  497. """
  498. def __init__(self, *, verify_ssl=True, fingerprint=None,
  499. use_dns_cache=True, ttl_dns_cache=10,
  500. family=0, ssl_context=None, ssl=None, local_addr=None,
  501. resolver=None, keepalive_timeout=sentinel,
  502. force_close=False, limit=100, limit_per_host=0,
  503. enable_cleanup_closed=False, loop=None):
  504. super().__init__(keepalive_timeout=keepalive_timeout,
  505. force_close=force_close,
  506. limit=limit, limit_per_host=limit_per_host,
  507. enable_cleanup_closed=enable_cleanup_closed,
  508. loop=loop)
  509. self._ssl = _merge_ssl_params(ssl, verify_ssl, ssl_context,
  510. fingerprint)
  511. if resolver is None:
  512. resolver = DefaultResolver(loop=self._loop)
  513. self._resolver = resolver
  514. self._use_dns_cache = use_dns_cache
  515. self._cached_hosts = _DNSCacheTable(ttl=ttl_dns_cache)
  516. self._throttle_dns_events = {}
  517. self._family = family
  518. self._local_addr = local_addr
  519. def close(self):
  520. """Close all ongoing DNS calls."""
  521. for ev in self._throttle_dns_events.values():
  522. ev.cancel()
  523. super().close()
  524. @property
  525. def family(self):
  526. """Socket family like AF_INET."""
  527. return self._family
  528. @property
  529. def use_dns_cache(self):
  530. """True if local DNS caching is enabled."""
  531. return self._use_dns_cache
  532. def clear_dns_cache(self, host=None, port=None):
  533. """Remove specified host/port or clear all dns local cache."""
  534. if host is not None and port is not None:
  535. self._cached_hosts.remove((host, port))
  536. elif host is not None or port is not None:
  537. raise ValueError("either both host and port "
  538. "or none of them are allowed")
  539. else:
  540. self._cached_hosts.clear()
  541. async def _resolve_host(self, host, port, traces=None):
  542. if is_ip_address(host):
  543. return [{'hostname': host, 'host': host, 'port': port,
  544. 'family': self._family, 'proto': 0, 'flags': 0}]
  545. if not self._use_dns_cache:
  546. if traces:
  547. for trace in traces:
  548. await trace.send_dns_resolvehost_start(host)
  549. res = (await self._resolver.resolve(
  550. host, port, family=self._family))
  551. if traces:
  552. for trace in traces:
  553. await trace.send_dns_resolvehost_end(host)
  554. return res
  555. key = (host, port)
  556. if (key in self._cached_hosts) and \
  557. (not self._cached_hosts.expired(key)):
  558. if traces:
  559. for trace in traces:
  560. await trace.send_dns_cache_hit(host)
  561. return self._cached_hosts.next_addrs(key)
  562. if key in self._throttle_dns_events:
  563. if traces:
  564. for trace in traces:
  565. await trace.send_dns_cache_hit(host)
  566. await self._throttle_dns_events[key].wait()
  567. else:
  568. if traces:
  569. for trace in traces:
  570. await trace.send_dns_cache_miss(host)
  571. self._throttle_dns_events[key] = \
  572. EventResultOrError(self._loop)
  573. try:
  574. if traces:
  575. for trace in traces:
  576. await trace.send_dns_resolvehost_start(host)
  577. addrs = await \
  578. self._resolver.resolve(host, port, family=self._family)
  579. if traces:
  580. for trace in traces:
  581. await trace.send_dns_resolvehost_end(host)
  582. self._cached_hosts.add(key, addrs)
  583. self._throttle_dns_events[key].set()
  584. except BaseException as e:
  585. # any DNS exception, independently of the implementation
  586. # is set for the waiters to raise the same exception.
  587. self._throttle_dns_events[key].set(exc=e)
  588. raise
  589. finally:
  590. self._throttle_dns_events.pop(key)
  591. return self._cached_hosts.next_addrs(key)
  592. async def _create_connection(self, req, traces, timeout):
  593. """Create connection.
  594. Has same keyword arguments as BaseEventLoop.create_connection.
  595. """
  596. if req.proxy:
  597. _, proto = await self._create_proxy_connection(
  598. req, traces, timeout)
  599. else:
  600. _, proto = await self._create_direct_connection(
  601. req, traces, timeout)
  602. return proto
  603. @staticmethod
  604. @functools.lru_cache(None)
  605. def _make_ssl_context(verified):
  606. if verified:
  607. return ssl.create_default_context()
  608. else:
  609. sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
  610. sslcontext.options |= ssl.OP_NO_SSLv2
  611. sslcontext.options |= ssl.OP_NO_SSLv3
  612. sslcontext.options |= ssl.OP_NO_COMPRESSION
  613. sslcontext.set_default_verify_paths()
  614. return sslcontext
  615. def _get_ssl_context(self, req):
  616. """Logic to get the correct SSL context
  617. 0. if req.ssl is false, return None
  618. 1. if ssl_context is specified in req, use it
  619. 2. if _ssl_context is specified in self, use it
  620. 3. otherwise:
  621. 1. if verify_ssl is not specified in req, use self.ssl_context
  622. (will generate a default context according to self.verify_ssl)
  623. 2. if verify_ssl is True in req, generate a default SSL context
  624. 3. if verify_ssl is False in req, generate a SSL context that
  625. won't verify
  626. """
  627. if req.is_ssl():
  628. if ssl is None: # pragma: no cover
  629. raise RuntimeError('SSL is not supported.')
  630. sslcontext = req.ssl
  631. if isinstance(sslcontext, ssl.SSLContext):
  632. return sslcontext
  633. if sslcontext is not None:
  634. # not verified or fingerprinted
  635. return self._make_ssl_context(False)
  636. sslcontext = self._ssl
  637. if isinstance(sslcontext, ssl.SSLContext):
  638. return sslcontext
  639. if sslcontext is not None:
  640. # not verified or fingerprinted
  641. return self._make_ssl_context(False)
  642. return self._make_ssl_context(True)
  643. else:
  644. return None
  645. def _get_fingerprint(self, req):
  646. ret = req.ssl
  647. if isinstance(ret, Fingerprint):
  648. return ret
  649. ret = self._ssl
  650. if isinstance(ret, Fingerprint):
  651. return ret
  652. return None
  653. async def _wrap_create_connection(self, *args,
  654. req, timeout,
  655. client_error=ClientConnectorError,
  656. **kwargs):
  657. try:
  658. with CeilTimeout(timeout.sock_connect):
  659. return await self._loop.create_connection(*args, **kwargs)
  660. except cert_errors as exc:
  661. raise ClientConnectorCertificateError(
  662. req.connection_key, exc) from exc
  663. except ssl_errors as exc:
  664. raise ClientConnectorSSLError(req.connection_key, exc) from exc
  665. except OSError as exc:
  666. raise client_error(req.connection_key, exc) from exc
  667. async def _create_direct_connection(self, req, traces, timeout,
  668. *, client_error=ClientConnectorError):
  669. sslcontext = self._get_ssl_context(req)
  670. fingerprint = self._get_fingerprint(req)
  671. try:
  672. # Cancelling this lookup should not cancel the underlying lookup
  673. # or else the cancel event will get broadcast to all the waiters
  674. # across all connections.
  675. hosts = await asyncio.shield(self._resolve_host(
  676. req.url.raw_host,
  677. req.port,
  678. traces=traces), loop=self._loop)
  679. except OSError as exc:
  680. # in case of proxy it is not ClientProxyConnectionError
  681. # it is problem of resolving proxy ip itself
  682. raise ClientConnectorError(req.connection_key, exc) from exc
  683. last_exc = None
  684. for hinfo in hosts:
  685. host = hinfo['host']
  686. port = hinfo['port']
  687. try:
  688. transp, proto = await self._wrap_create_connection(
  689. self._factory, host, port, timeout=timeout,
  690. ssl=sslcontext, family=hinfo['family'],
  691. proto=hinfo['proto'], flags=hinfo['flags'],
  692. server_hostname=hinfo['hostname'] if sslcontext else None,
  693. local_addr=self._local_addr,
  694. req=req, client_error=client_error)
  695. except ClientConnectorError as exc:
  696. last_exc = exc
  697. continue
  698. if req.is_ssl() and fingerprint:
  699. try:
  700. fingerprint.check(transp)
  701. except ServerFingerprintMismatch as exc:
  702. transp.close()
  703. if not self._cleanup_closed_disabled:
  704. self._cleanup_closed_transports.append(transp)
  705. last_exc = exc
  706. continue
  707. return transp, proto
  708. else:
  709. raise last_exc
  710. async def _create_proxy_connection(self, req, traces, timeout):
  711. headers = {}
  712. if req.proxy_headers is not None:
  713. headers = req.proxy_headers
  714. headers[hdrs.HOST] = req.headers[hdrs.HOST]
  715. proxy_req = ClientRequest(
  716. hdrs.METH_GET, req.proxy,
  717. headers=headers,
  718. auth=req.proxy_auth,
  719. loop=self._loop,
  720. ssl=req.ssl)
  721. # create connection to proxy server
  722. transport, proto = await self._create_direct_connection(
  723. proxy_req, [], timeout, client_error=ClientProxyConnectionError)
  724. # Many HTTP proxies has buggy keepalive support. Let's not
  725. # reuse connection but close it after processing every
  726. # response.
  727. proto.force_close()
  728. auth = proxy_req.headers.pop(hdrs.AUTHORIZATION, None)
  729. if auth is not None:
  730. if not req.is_ssl():
  731. req.headers[hdrs.PROXY_AUTHORIZATION] = auth
  732. else:
  733. proxy_req.headers[hdrs.PROXY_AUTHORIZATION] = auth
  734. if req.is_ssl():
  735. sslcontext = self._get_ssl_context(req)
  736. # For HTTPS requests over HTTP proxy
  737. # we must notify proxy to tunnel connection
  738. # so we send CONNECT command:
  739. # CONNECT www.python.org:443 HTTP/1.1
  740. # Host: www.python.org
  741. #
  742. # next we must do TLS handshake and so on
  743. # to do this we must wrap raw socket into secure one
  744. # asyncio handles this perfectly
  745. proxy_req.method = hdrs.METH_CONNECT
  746. proxy_req.url = req.url
  747. key = attr.evolve(req.connection_key,
  748. proxy=None,
  749. proxy_auth=None,
  750. proxy_headers_hash=None)
  751. conn = Connection(self, key, proto, self._loop)
  752. proxy_resp = await proxy_req.send(conn)
  753. try:
  754. conn._protocol.set_response_params()
  755. resp = await proxy_resp.start(conn)
  756. except BaseException:
  757. proxy_resp.close()
  758. conn.close()
  759. raise
  760. else:
  761. conn._protocol = None
  762. conn._transport = None
  763. try:
  764. if resp.status != 200:
  765. raise ClientHttpProxyError(
  766. proxy_resp.request_info,
  767. resp.history,
  768. status=resp.status,
  769. message=resp.reason,
  770. headers=resp.headers)
  771. rawsock = transport.get_extra_info('socket', default=None)
  772. if rawsock is None:
  773. raise RuntimeError(
  774. "Transport does not expose socket instance")
  775. # Duplicate the socket, so now we can close proxy transport
  776. rawsock = rawsock.dup()
  777. finally:
  778. transport.close()
  779. transport, proto = await self._wrap_create_connection(
  780. self._factory, timeout=timeout,
  781. ssl=sslcontext, sock=rawsock,
  782. server_hostname=req.host,
  783. req=req)
  784. finally:
  785. proxy_resp.close()
  786. return transport, proto
  787. class UnixConnector(BaseConnector):
  788. """Unix socket connector.
  789. path - Unix socket path.
  790. keepalive_timeout - (optional) Keep-alive timeout.
  791. force_close - Set to True to force close and do reconnect
  792. after each request (and between redirects).
  793. limit - The total number of simultaneous connections.
  794. limit_per_host - Number of simultaneous connections to one host.
  795. loop - Optional event loop.
  796. """
  797. def __init__(self, path, force_close=False, keepalive_timeout=sentinel,
  798. limit=100, limit_per_host=0, loop=None):
  799. super().__init__(force_close=force_close,
  800. keepalive_timeout=keepalive_timeout,
  801. limit=limit, limit_per_host=limit_per_host, loop=loop)
  802. self._path = path
  803. @property
  804. def path(self):
  805. """Path to unix socket."""
  806. return self._path
  807. async def _create_connection(self, req, traces, timeout):
  808. try:
  809. with CeilTimeout(timeout.sock_connect):
  810. _, proto = await self._loop.create_unix_connection(
  811. self._factory, self._path)
  812. except OSError as exc:
  813. raise ClientConnectorError(req.connection_key, exc) from exc
  814. return proto