Skip to content

Commit

Permalink
Merge pull request #42 from oracle/release_2018-01-11
Browse files Browse the repository at this point in the history
Add pagination module files to 1.3.12 release
  • Loading branch information
nathan-vu authored Jan 11, 2018
2 parents 090bcd5 + e4a9659 commit 2b5bbe3
Show file tree
Hide file tree
Showing 5 changed files with 555 additions and 0 deletions.
6 changes: 6 additions & 0 deletions src/oci/pagination/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# coding: utf-8
# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.

from .pagination_utils import list_call_get_all_results, list_call_get_up_to_limit, list_call_get_all_results_generator, list_call_get_up_to_limit_generator

__all__ = ["list_call_get_all_results", "list_call_get_up_to_limit", "list_call_get_all_results_generator", "list_call_get_up_to_limit_generator"]
6 changes: 6 additions & 0 deletions src/oci/pagination/internal/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# coding: utf-8
# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.

from . import retry

__all__ = ["retry"]
199 changes: 199 additions & 0 deletions src/oci/pagination/internal/retry.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,199 @@
# coding: utf-8
# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.

from . import retry_checkers

import random
import time


class RetryStrategyBuilder(object):
"""
A class which can build a retry strategy based on provided criteria. Criteria can be provided at construction time or
afterwards via using the add_* (to add/enable criteria) and no_* (to disable/remove criteria) methods.
When calculating the delay between retries, we use exponential backoff with full jitter as the default strategy
vended by this builder.
"""
BACKOFF_FULL_JITTER = 'full_jitter'

def __init__(self, **kwargs):
"""
Creates a new builder and initializes it based on any provided parameters.
:param Boolean max_attempts_check (optional):
Whether to enable a check that we don't exceed a certain number of attempts. If not provided
this defaults to False (i.e. this check will not be done)
:param Boolean service_error_check (optional):
Whether to enable a check that will retry on connection errors, timeouts and service errors
which match given combinations of HTTP statuses and textual error codes. If not provided
this defaults to False (i.e. this check will not be done)
:param int max_atttemps (optional):
If we are checking that we don't exceed a certain number of attempts, what that number of
attempts should be. This only applies if we are performing a check on the maximum number of
attempts and will be ignored otherwise. If we are performing a check on the maximum number of
attempts and this value is not provided, we will default to a maximum of 5 attempts
:param dict service_error_retry_config (optional):
If we are checking on service errors, we can configure what HTTP statuses (e.g. 429) to retry on and, optionally,
whether the textual code (e.g. TooManyRequests) matches a given value.
This is a dictionary where the key is an integer representing the HTTP status, and the value is a list(str) where we
will test if the textual code in the service error is a member of the list. If an empty list is provided, then only
the numeric status is checked for retry purposes.
If we are performing a check on service errors and this value is not provided, then by default we will retry on
HTTP 429's (throttles) without any textual code check.
:param Boolean service_error_retry_on_any_5xx (optional):
If we are checking on service errors, whether to retry on any HTTP 5xx received from the service. If
we are performing a check on service errors and this value is not provided, it defaults to True (retry on any 5xx)
:param int retry_base_sleep_time_millis (optional):
For exponential backoff with jitter, the base time to use in our retry calculation in milliseconds. If not
provided, this value defaults to 1000ms (i.e. 1 second)
:param int retry_exponential_growth_factor (optional):
For exponential backoff with jitter, the exponent which we will raise to the power of the number of attempts. If
not provided, this value defaults to 2
:param int retry_max_wait_time_millis (optional):
For exponential backoff with jitter, the maximum amount of time to wait between retries. If not provided, this
value defaults to 8000ms (i.e. 8 seconds)
:param str backoff_type (optional):
The type of backoff we want to do (e.g. full jitter). Currently the only supported value is 'full_jitter' (the convenience
constant BACKOFF_FULL_JITTER in this class can also be used)
"""

self.max_attempts_check = kwargs.get('max_attempts_check', False)
self.service_error_check = kwargs.get('service_error_check', False)

self.max_attempts = kwargs.get('max_attempts', None)
self.service_error_retry_config = kwargs.get('service_error_retry_config', {})
self.service_error_retry_on_any_5xx = kwargs.get('service_error_retry_on_any_5xx', True)

self.retry_base_sleep_time_millis = kwargs.get('retry_base_sleep_time_millis', 1000)
self.retry_exponential_growth_factor = kwargs.get('retry_exponential_growth_factor', 2)
self.retry_max_wait_time_millis = kwargs.get('retry_max_wait_time_millis', 8000)

if 'backoff_type' in kwargs and kwargs['backoff_type'] != self.BACKOFF_FULL_JITTER:
raise ValueError('Currently full_jitter is the only supported backoff type')

def add_max_attempts(self, max_attempts=None):
self.max_attempts_check = True
if max_attempts:
self.max_attempts = max_attempts
return self

def no_max_attemps(self):
self.max_attempts_check = False
return self

def add_service_error_check(self, **kwargs):
self.service_error_check = True

if 'service_error_retry_config' in kwargs:
self.service_error_retry_config = kwargs['service_error_retry_config']
elif 'service_error_status' in kwargs and 'service_error_codes' in kwargs:
self.service_error_retry_config[kwargs['service_error_status']] = kwargs['service_error_codes']

if 'service_error_retry_on_any_5xx' in kwargs:
self.service_error_retry_on_any_5xx = kwargs['service_error_retry_on_any_5xx']

return self

def no_service_error_check(self):
self.service_error_check = False
return self

def get_retry_strategy(self):
checkers = []

if self.max_attempts_check:
if self.max_attempts:
checkers.append(retry_checkers.LimitBasedRetryChecker(max_attempts=self.max_attempts))
else:
checkers.append(retry_checkers.LimitBasedRetryChecker())

if self.service_error_check:
if self.service_error_retry_config:
checkers.append(
retry_checkers.TimeoutConnectionAndServiceErrorRetryChecker(
service_error_retry_config=self.service_error_retry_config,
retry_any_5xx=self.service_error_retry_on_any_5xx
)
)
else:
checkers.append(retry_checkers.TimeoutConnectionAndServiceErrorRetryChecker(retry_any_5xx=self.service_error_retry_on_any_5xx))

checker_container = retry_checkers.RetryCheckerContainer(checkers=checkers)

return ExponentialBackoffWithFullJitterRetryStrategy(
base_sleep_time_millis=self.retry_base_sleep_time_millis,
exponent_growth_factor=self.retry_exponential_growth_factor,
max_wait_millis=self.retry_max_wait_time_millis,
checker_container=checker_container
)


class ExponentialBackoffWithFullJitterRetryStrategy(object):
"""
A retry strategy which does exponential backoff and full jitter. Times used are in milliseconds and
the strategy can be described as:
.. code-block:: none
random(0, min(base_sleep_time_millis * exponent_growth_factor ** (attempt), max_wait_millis))
"""

def __init__(self, base_sleep_time_millis, exponent_growth_factor, max_wait_millis, checker_container, **kwargs):
"""
Creates a new instance of an exponential backoff with full jitter retry strategy.
:param int base_sleep_time_millis:
The base amount to sleep by, in milliseconds
:param int exponent_growth_factor:
The exponent part of our backoff. We will raise take this value and raising it to the power
of attemps and then multiply this with base_sleep_time_millis
:param int max_wait_millis:
The maximum time we will wait between calls
:param retry_checkers.RetryCheckerContainer checker_container:
The checks to run to determine whether a failed call should be retried
"""
self.base_sleep_time_millis = base_sleep_time_millis
self.exponent_growth_factor = exponent_growth_factor
self.max_wait_millis = max_wait_millis
self.checkers = checker_container

def make_retrying_call(self, func_ref, *func_args, **func_kwargs):
"""
Calls the function given by func_ref. Any positional (*func_args) and keyword (**func_kwargs)
arguments are passed as-is to func_ref.
:param function func_ref:
The function that we should call with retries
:return: the result of calling func_ref
"""
should_retry = True
attempt = 0
while should_retry:
try:
return func_ref(*func_args, **func_kwargs)
except Exception as e:
attempt += 1
if self.checkers.should_retry(exception=e, current_attempt=attempt):
self.do_sleep(attempt)
else:
raise

def do_sleep(self, attempt):
sleep_time_millis = random.uniform(0, min(self.base_sleep_time_millis * (self.exponent_growth_factor ** attempt), self.max_wait_millis))
time.sleep(sleep_time_millis / 1000.0) # time.sleep needs seconds, but can take fractional seconds
150 changes: 150 additions & 0 deletions src/oci/pagination/internal/retry_checkers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
# coding: utf-8
# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
#
# Contains helper classes that can say whether a retry should occur based on various criteria, such as a maximum number of retries being
# hit or the exception received from a service call (or the response from the service call if it didn't exception out).

from ...exceptions import ServiceError
from requests.exceptions import Timeout
from requests.exceptions import ConnectionError


class RetryCheckerContainer(object):
"""
A container which holds at least one retry checker. This lets us chain together different retry checkers into an overall
evaluation of whether we should retry a request.
Checkers are evaluated in the order they appear in the provided list of checkers, and if one checker reports failure we
consider this to be an overall failure and no more retries should happen.
"""

def __init__(self, checkers, **kwargs):
if not checkers:
raise ValueError('At least one retry checker needs to be provided')
self.checkers = checkers

def add_checker(self, checker):
self.checkers.append(checker)

def should_retry(self, exception=None, response=None, **kwargs):
"""
Determines if a retry should be performed based on either an exception or a response. We will
retry if all the checkers held in this container indicate that they should retry; if any checker
indicates that the call should not be retried then we will not retry.
:param Exception exception:
An exception received from the service
:param Response response:
The :class:`~oci.response.Response` received from a service call
:return: True if we should retry, and False otherwise
:rtype: Boolean
"""
for c in self.checkers:
if not c.should_retry(exception, response, **kwargs):
return False

return True


class BaseRetryChecker(object):
"""
The base class from which all retry checkers should derive. This has no implementation but just defines the contract
for a checker.
"""

def __init__(self, **kwargs):
pass

def should_retry(self, exception=None, response=None, **kwargs):
"""
Determines if a retry should be performed based on either an exception or a response.
:param Exception exception:
An exception received from the service
:param Response response:
The :class:`~oci.response.Response` received from a service call
:return: True if we should retry, and False otherwise
:rtype: Boolean
"""
raise NotImplementedError('Subclasses should implement this')


class LimitBasedRetryChecker(BaseRetryChecker):
"""
A retry checker which can retry as long as some threshold (# of attempts/tries) has not been breached.
It is the repsonsibility of the caller to track how many attempts/tries it has done - objects of this
class will not track this.
If not specified, the default number of tries allowed is 5. Tries are also assumed to be one-based (i.e. the
first attempt/try is 1, the second is 2 etc)
"""

def __init__(self, max_attempts=5, **kwargs):
if max_attempts < 1:
raise ValueError('The max number of attempts must be >= 1, with 1 indicating no retries')

super(LimitBasedRetryChecker, self).__init__(**kwargs)
self.max_attempts = max_attempts

def should_retry(self, exception=None, response=None, **kwargs):
return self.max_attempts > kwargs.get('current_attempt', 0)


class TimeoutConnectionAndServiceErrorRetryChecker(BaseRetryChecker):
RETRYABLE_STATUSES_AND_CODES = {
-1: [],
429: []
}

"""
A checker which will retry on certain exceptions. Retries are enabled for the following exception types:
- Timeouts from the requests library (we will always retry on these)
- ConnectionErrors from the requests library (we will always retry on these)
- Service errors where the status is 500 or above (i.e. a server-side error)
- Service errors where a status (e.g. 429) and, optionally, the code meet a given criteria
The last item is configurable via dictionary where the key is some numeric status representing a HTTP status and the value
is a list of strings with each string representing a textual error code (such as those error codes documented at
https://docs.us-phoenix-1.oraclecloud.com/Content/API/References/apierrors.htm). If an empty list is provided, then
only the numeric status is checked for retry purposes. For a populated array, we are looking for where the numeric status matches
and the code from the exception appears in the array. As an example:
.. code-block:: python
{
400: ['QuotaExceeded'],
500: []
}
If no configuration is provided, then the default for service errors is to retry on HTTP 429's and 5xx's without any code checks. If a
specific 5xx code (e.g. 500, 502) is provided in the dictionary then it takes precedence over the option to retry on any 500. For example
it is possible to retry on only 502s (either by status or by status and matching some code ) by disabling the general "retry on any 5xx"
configuration and placing an entry for 502 in the dictionary
"""

def __init__(self, service_error_retry_config=RETRYABLE_STATUSES_AND_CODES, retry_any_5xx=True, **kwargs):
super(TimeoutConnectionAndServiceErrorRetryChecker, self).__init__(**kwargs)
self.retry_any_5xx = retry_any_5xx
self.service_error_retry_config = service_error_retry_config

def should_retry(self, exception=None, response=None, **kwargs):
if isinstance(exception, Timeout):
return True
elif isinstance(exception, ConnectionError):
return True
elif isinstance(exception, ServiceError):
if exception.status in self.service_error_retry_config:
codes = self.service_error_retry_config[exception.status]
if not codes:
return True
else:
return exception.code in codes
elif self.retry_any_5xx and exception.status >= 500:
return True

return False
Loading

0 comments on commit 2b5bbe3

Please sign in to comment.