endpoint.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
  2. # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License"). You
  5. # may not use this file except in compliance with the License. A copy of
  6. # the License is located at
  7. #
  8. # http://aws.amazon.com/apache2.0/
  9. #
  10. # or in the "license" file accompanying this file. This file is
  11. # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
  12. # ANY KIND, either express or implied. See the License for the specific
  13. # language governing permissions and limitations under the License.
  14. import os
  15. import logging
  16. import time
  17. import threading
  18. from botocore.vendored import six
  19. from botocore.awsrequest import create_request_object
  20. from botocore.exceptions import HTTPClientError
  21. from botocore.httpsession import URLLib3Session
  22. from botocore.utils import is_valid_endpoint_url, get_environ_proxies
  23. from botocore.hooks import first_non_none_response
  24. from botocore.history import get_global_history_recorder
  25. from botocore.response import StreamingBody
  26. from botocore import parsers
  27. logger = logging.getLogger(__name__)
  28. history_recorder = get_global_history_recorder()
  29. DEFAULT_TIMEOUT = 60
  30. MAX_POOL_CONNECTIONS = 10
  31. def convert_to_response_dict(http_response, operation_model):
  32. """Convert an HTTP response object to a request dict.
  33. This converts the requests library's HTTP response object to
  34. a dictionary.
  35. :type http_response: botocore.vendored.requests.model.Response
  36. :param http_response: The HTTP response from an AWS service request.
  37. :rtype: dict
  38. :return: A response dictionary which will contain the following keys:
  39. * headers (dict)
  40. * status_code (int)
  41. * body (string or file-like object)
  42. """
  43. response_dict = {
  44. 'headers': http_response.headers,
  45. 'status_code': http_response.status_code,
  46. 'context': {
  47. 'operation_name': operation_model.name,
  48. }
  49. }
  50. if response_dict['status_code'] >= 300:
  51. response_dict['body'] = http_response.content
  52. elif operation_model.has_event_stream_output:
  53. response_dict['body'] = http_response.raw
  54. elif operation_model.has_streaming_output:
  55. length = response_dict['headers'].get('content-length')
  56. response_dict['body'] = StreamingBody(http_response.raw, length)
  57. else:
  58. response_dict['body'] = http_response.content
  59. return response_dict
  60. class Endpoint(object):
  61. """
  62. Represents an endpoint for a particular service in a specific
  63. region. Only an endpoint can make requests.
  64. :ivar service: The Service object that describes this endpoints
  65. service.
  66. :ivar host: The fully qualified endpoint hostname.
  67. :ivar session: The session object.
  68. """
  69. def __init__(self, host, endpoint_prefix, event_emitter,
  70. response_parser_factory=None, http_session=None):
  71. self._endpoint_prefix = endpoint_prefix
  72. self._event_emitter = event_emitter
  73. self.host = host
  74. self._lock = threading.Lock()
  75. if response_parser_factory is None:
  76. response_parser_factory = parsers.ResponseParserFactory()
  77. self._response_parser_factory = response_parser_factory
  78. self.http_session = http_session
  79. if self.http_session is None:
  80. self.http_session = URLLib3Session()
  81. def __repr__(self):
  82. return '%s(%s)' % (self._endpoint_prefix, self.host)
  83. def make_request(self, operation_model, request_dict):
  84. logger.debug("Making request for %s with params: %s",
  85. operation_model, request_dict)
  86. return self._send_request(request_dict, operation_model)
  87. def create_request(self, params, operation_model=None):
  88. request = create_request_object(params)
  89. if operation_model:
  90. request.stream_output = any([
  91. operation_model.has_streaming_output,
  92. operation_model.has_event_stream_output
  93. ])
  94. service_id = operation_model.service_model.service_id.hyphenize()
  95. event_name = 'request-created.{service_id}.{op_name}'.format(
  96. service_id=service_id,
  97. op_name=operation_model.name)
  98. self._event_emitter.emit(event_name, request=request,
  99. operation_name=operation_model.name)
  100. prepared_request = self.prepare_request(request)
  101. return prepared_request
  102. def _encode_headers(self, headers):
  103. # In place encoding of headers to utf-8 if they are unicode.
  104. for key, value in headers.items():
  105. if isinstance(value, six.text_type):
  106. headers[key] = value.encode('utf-8')
  107. def prepare_request(self, request):
  108. self._encode_headers(request.headers)
  109. return request.prepare()
  110. def _send_request(self, request_dict, operation_model):
  111. attempts = 1
  112. request = self.create_request(request_dict, operation_model)
  113. context = request_dict['context']
  114. success_response, exception = self._get_response(
  115. request, operation_model, context)
  116. while self._needs_retry(attempts, operation_model, request_dict,
  117. success_response, exception):
  118. attempts += 1
  119. # If there is a stream associated with the request, we need
  120. # to reset it before attempting to send the request again.
  121. # This will ensure that we resend the entire contents of the
  122. # body.
  123. request.reset_stream()
  124. # Create a new request when retried (including a new signature).
  125. request = self.create_request(
  126. request_dict, operation_model)
  127. success_response, exception = self._get_response(
  128. request, operation_model, context)
  129. if success_response is not None and \
  130. 'ResponseMetadata' in success_response[1]:
  131. # We want to share num retries, not num attempts.
  132. total_retries = attempts - 1
  133. success_response[1]['ResponseMetadata']['RetryAttempts'] = \
  134. total_retries
  135. if exception is not None:
  136. raise exception
  137. else:
  138. return success_response
  139. def _get_response(self, request, operation_model, context):
  140. # This will return a tuple of (success_response, exception)
  141. # and success_response is itself a tuple of
  142. # (http_response, parsed_dict).
  143. # If an exception occurs then the success_response is None.
  144. # If no exception occurs then exception is None.
  145. success_response, exception = self._do_get_response(
  146. request, operation_model)
  147. kwargs_to_emit = {
  148. 'response_dict': None,
  149. 'parsed_response': None,
  150. 'context': context,
  151. 'exception': exception,
  152. }
  153. if success_response is not None:
  154. http_response, parsed_response = success_response
  155. kwargs_to_emit['parsed_response'] = parsed_response
  156. kwargs_to_emit['response_dict'] = convert_to_response_dict(
  157. http_response, operation_model)
  158. service_id = operation_model.service_model.service_id.hyphenize()
  159. self._event_emitter.emit(
  160. 'response-received.%s.%s' % (
  161. service_id, operation_model.name), **kwargs_to_emit)
  162. return success_response, exception
  163. def _do_get_response(self, request, operation_model):
  164. try:
  165. logger.debug("Sending http request: %s", request)
  166. history_recorder.record('HTTP_REQUEST', {
  167. 'method': request.method,
  168. 'headers': request.headers,
  169. 'streaming': operation_model.has_streaming_input,
  170. 'url': request.url,
  171. 'body': request.body
  172. })
  173. service_id = operation_model.service_model.service_id.hyphenize()
  174. event_name = 'before-send.%s.%s' % (service_id, operation_model.name)
  175. responses = self._event_emitter.emit(event_name, request=request)
  176. http_response = first_non_none_response(responses)
  177. if http_response is None:
  178. http_response = self._send(request)
  179. except HTTPClientError as e:
  180. return (None, e)
  181. except Exception as e:
  182. logger.debug("Exception received when sending HTTP request.",
  183. exc_info=True)
  184. return (None, e)
  185. # This returns the http_response and the parsed_data.
  186. response_dict = convert_to_response_dict(http_response, operation_model)
  187. http_response_record_dict = response_dict.copy()
  188. http_response_record_dict['streaming'] = \
  189. operation_model.has_streaming_output
  190. history_recorder.record('HTTP_RESPONSE', http_response_record_dict)
  191. protocol = operation_model.metadata['protocol']
  192. parser = self._response_parser_factory.create_parser(protocol)
  193. parsed_response = parser.parse(
  194. response_dict, operation_model.output_shape)
  195. # Do a second parsing pass to pick up on any modeled error fields
  196. # NOTE: Ideally, we would push this down into the parser classes but
  197. # they currently have no reference to the operation or service model
  198. # The parsers should probably take the operation model instead of
  199. # output shape but we can't change that now
  200. if http_response.status_code >= 300:
  201. self._add_modeled_error_fields(
  202. response_dict, parsed_response,
  203. operation_model, parser,
  204. )
  205. history_recorder.record('PARSED_RESPONSE', parsed_response)
  206. return (http_response, parsed_response), None
  207. def _add_modeled_error_fields(
  208. self, response_dict, parsed_response,
  209. operation_model, parser,
  210. ):
  211. error_code = parsed_response.get("Error", {}).get("Code")
  212. if error_code is None:
  213. return
  214. service_model = operation_model.service_model
  215. error_shape = service_model.shape_for_error_code(error_code)
  216. if error_shape is None:
  217. return
  218. modeled_parse = parser.parse(response_dict, error_shape)
  219. # TODO: avoid naming conflicts with ResponseMetadata and Error
  220. parsed_response.update(modeled_parse)
  221. def _needs_retry(self, attempts, operation_model, request_dict,
  222. response=None, caught_exception=None):
  223. service_id = operation_model.service_model.service_id.hyphenize()
  224. event_name = 'needs-retry.%s.%s' % (
  225. service_id,
  226. operation_model.name)
  227. responses = self._event_emitter.emit(
  228. event_name, response=response, endpoint=self,
  229. operation=operation_model, attempts=attempts,
  230. caught_exception=caught_exception, request_dict=request_dict)
  231. handler_response = first_non_none_response(responses)
  232. if handler_response is None:
  233. return False
  234. else:
  235. # Request needs to be retried, and we need to sleep
  236. # for the specified number of times.
  237. logger.debug("Response received to retry, sleeping for "
  238. "%s seconds", handler_response)
  239. time.sleep(handler_response)
  240. return True
  241. def _send(self, request):
  242. return self.http_session.send(request)
  243. class EndpointCreator(object):
  244. def __init__(self, event_emitter):
  245. self._event_emitter = event_emitter
  246. def create_endpoint(self, service_model, region_name, endpoint_url,
  247. verify=None, response_parser_factory=None,
  248. timeout=DEFAULT_TIMEOUT,
  249. max_pool_connections=MAX_POOL_CONNECTIONS,
  250. http_session_cls=URLLib3Session,
  251. proxies=None,
  252. socket_options=None,
  253. client_cert=None,
  254. proxies_config=None):
  255. if not is_valid_endpoint_url(endpoint_url):
  256. raise ValueError("Invalid endpoint: %s" % endpoint_url)
  257. if proxies is None:
  258. proxies = self._get_proxies(endpoint_url)
  259. endpoint_prefix = service_model.endpoint_prefix
  260. logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout)
  261. http_session = http_session_cls(
  262. timeout=timeout,
  263. proxies=proxies,
  264. verify=self._get_verify_value(verify),
  265. max_pool_connections=max_pool_connections,
  266. socket_options=socket_options,
  267. client_cert=client_cert,
  268. proxies_config=proxies_config
  269. )
  270. return Endpoint(
  271. endpoint_url,
  272. endpoint_prefix=endpoint_prefix,
  273. event_emitter=self._event_emitter,
  274. response_parser_factory=response_parser_factory,
  275. http_session=http_session
  276. )
  277. def _get_proxies(self, url):
  278. # We could also support getting proxies from a config file,
  279. # but for now proxy support is taken from the environment.
  280. return get_environ_proxies(url)
  281. def _get_verify_value(self, verify):
  282. # This is to account for:
  283. # https://github.com/kennethreitz/requests/issues/1436
  284. # where we need to honor REQUESTS_CA_BUNDLE because we're creating our
  285. # own request objects.
  286. # First, if verify is not None, then the user explicitly specified
  287. # a value so this automatically wins.
  288. if verify is not None:
  289. return verify
  290. # Otherwise use the value from REQUESTS_CA_BUNDLE, or default to
  291. # True if the env var does not exist.
  292. return os.environ.get('REQUESTS_CA_BUNDLE', True)