diff --git a/doc/tutorial/statistical_inference/unsupervised_learning_fixture.py b/doc/tutorial/statistical_inference/unsupervised_learning_fixture.py index 13f4fc1399a90..6c8a6fef56ad6 100644 --- a/doc/tutorial/statistical_inference/unsupervised_learning_fixture.py +++ b/doc/tutorial/statistical_inference/unsupervised_learning_fixture.py @@ -1,10 +1,11 @@ -"""Fixture module to skip the unsupervised_learning.rst doctest for -versions of SciPy earlier than 0.12.0. +"""Fixture module to skip the unsupervised_learning.rst doctest for +versions of SciPy earlier than 0.12.0. """ from sklearn.utils.testing import SkipTest from sklearn.utils.fixes import sp_version +from sklearn.utils import parse_version def setup_module(module): - if sp_version < (0, 12): + if sp_version < parse_version('0.12'): raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and " "thus does not include the scipy.misc.face() image.") diff --git a/sklearn/cross_decomposition/pls_.py b/sklearn/cross_decomposition/pls_.py index baf61a521edae..3915c027a327d 100644 --- a/sklearn/cross_decomposition/pls_.py +++ b/sklearn/cross_decomposition/pls_.py @@ -4,11 +4,10 @@ # Author: Edouard Duchesnay # License: BSD 3 clause -from distutils.version import LooseVersion from sklearn.utils.extmath import svd_flip from ..base import BaseEstimator, RegressorMixin, TransformerMixin -from ..utils import check_array, check_consistent_length +from ..utils import check_array, check_consistent_length, parse_version from ..externals import six import warnings @@ -22,7 +21,7 @@ import scipy pinv2_args = {} -if LooseVersion(scipy.__version__) >= LooseVersion('0.12'): +if parse_version(scipy.__version__) >= parse_version('0.12'): # check_finite=False is an optimization available only in scipy >=0.12 pinv2_args = {'check_finite': False} diff --git a/sklearn/feature_extraction/tests/test_image.py b/sklearn/feature_extraction/tests/test_image.py index 6c57788efe904..d398d5c18dd52 100644 --- a/sklearn/feature_extraction/tests/test_image.py +++ b/sklearn/feature_extraction/tests/test_image.py @@ -14,8 +14,9 @@ from sklearn.utils.graph import connected_components from sklearn.utils.testing import SkipTest, assert_equal, assert_true from sklearn.utils.fixes import sp_version +from sklearn.utils.version import parse_version -if sp_version < (0, 12): +if sp_version < parse_version('0.12'): raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and " "thus does not include the scipy.misc.face() image.") diff --git a/sklearn/linear_model/least_angle.py b/sklearn/linear_model/least_angle.py index c06720a25299c..e35d96ff4cb05 100644 --- a/sklearn/linear_model/least_angle.py +++ b/sklearn/linear_model/least_angle.py @@ -13,7 +13,6 @@ from math import log import sys import warnings -from distutils.version import LooseVersion import numpy as np from scipy import linalg, interpolate @@ -28,9 +27,11 @@ from ..externals.six.moves import xrange from ..externals.six import string_types +from ..utils import parse_version + import scipy solve_triangular_args = {} -if LooseVersion(scipy.__version__) >= LooseVersion('0.12'): +if parse_version(scipy.__version__) >= parse_version('0.12'): solve_triangular_args = {'check_finite': False} diff --git a/sklearn/linear_model/omp.py b/sklearn/linear_model/omp.py index 5328a2ed81707..19ae6fade7bda 100644 --- a/sklearn/linear_model/omp.py +++ b/sklearn/linear_model/omp.py @@ -6,7 +6,6 @@ # License: BSD 3 clause import warnings -from distutils.version import LooseVersion import numpy as np from scipy import linalg @@ -18,9 +17,11 @@ from ..model_selection import check_cv from ..externals.joblib import Parallel, delayed +from ..utils import parse_version + import scipy solve_triangular_args = {} -if LooseVersion(scipy.__version__) >= LooseVersion('0.12'): +if parse_version(scipy.__version__) >= parse_version('0.12'): # check_finite=False is an optimization available only in scipy >=0.12 solve_triangular_args = {'check_finite': False} diff --git a/sklearn/linear_model/tests/test_logistic.py b/sklearn/linear_model/tests/test_logistic.py index ec2be517bf382..157d5f141388b 100644 --- a/sklearn/linear_model/tests/test_logistic.py +++ b/sklearn/linear_model/tests/test_logistic.py @@ -16,6 +16,7 @@ from sklearn.exceptions import ConvergenceWarning from sklearn.utils import compute_class_weight from sklearn.utils.fixes import sp_version +from sklearn.utils.version import parse_version from sklearn.linear_model.logistic import ( LogisticRegression, @@ -899,7 +900,7 @@ def test_max_iter(): solvers = ['newton-cg', 'liblinear', 'sag'] # old scipy doesn't have maxiter - if sp_version >= (0, 12): + if sp_version >= parse_version('0.12'): solvers.append('lbfgs') for max_iter in range(1, 5): @@ -969,7 +970,7 @@ def test_warm_start(): solvers = ['newton-cg', 'sag'] # old scipy doesn't have maxiter - if sp_version >= (0, 12): + if sp_version >= parse_version('0.12'): solvers.append('lbfgs') for warm_start in [True, False]: diff --git a/sklearn/metrics/tests/test_classification.py b/sklearn/metrics/tests/test_classification.py index e9616e933b70c..7e6ad39c6cd0c 100644 --- a/sklearn/metrics/tests/test_classification.py +++ b/sklearn/metrics/tests/test_classification.py @@ -26,6 +26,7 @@ from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import ignore_warnings from sklearn.utils.mocking import MockDataFrame +from sklearn.utils.version import parse_version from sklearn.metrics import accuracy_score from sklearn.metrics import average_precision_score @@ -691,7 +692,7 @@ def test_classification_report_multiclass_with_unicode_label(): avg / total 0.51 0.53 0.47 75 """ - if np_version[:3] < (1, 7, 0): + if np_version < parse_version('1.7.0'): expected_message = ("NumPy < 1.7.0 does not implement" " searchsorted on unicode data correctly.") assert_raise_message(RuntimeError, expected_message, diff --git a/sklearn/model_selection/_search.py b/sklearn/model_selection/_search.py index 3b8a0ed882cf5..9d0aedc79762e 100644 --- a/sklearn/model_selection/_search.py +++ b/sklearn/model_selection/_search.py @@ -27,7 +27,7 @@ from ..exceptions import NotFittedError from ..externals.joblib import Parallel, delayed from ..externals import six -from ..utils import check_random_state +from ..utils import check_random_state, parse_version from ..utils.fixes import sp_version from ..utils.fixes import rankdata from ..utils.fixes import MaskedArray @@ -251,7 +251,7 @@ def __iter__(self): params = dict() for k, v in items: if hasattr(v, "rvs"): - if sp_version < (0, 16): + if sp_version < parse_version('0.16'): params[k] = v.rvs() else: params[k] = v.rvs(random_state=rnd) diff --git a/sklearn/model_selection/tests/test_search.py b/sklearn/model_selection/tests/test_search.py index 055a4c061a7c0..4cccbb5efd80e 100644 --- a/sklearn/model_selection/tests/test_search.py +++ b/sklearn/model_selection/tests/test_search.py @@ -25,6 +25,7 @@ from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import ignore_warnings from sklearn.utils.mocking import CheckingClassifier, MockDataFrame +from sklearn.utils.version import parse_version from scipy.stats import bernoulli, expon, uniform @@ -688,7 +689,7 @@ def test_param_sampler(): n_iter=3, random_state=0) assert_equal([x for x in sampler], [x for x in sampler]) - if sp_version >= (0, 16): + if sp_version >= parse_version('0.16'): param_distributions = {"C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) diff --git a/sklearn/neighbors/tests/test_dist_metrics.py b/sklearn/neighbors/tests/test_dist_metrics.py index b5eafb194309e..03d8f0c731032 100644 --- a/sklearn/neighbors/tests/test_dist_metrics.py +++ b/sklearn/neighbors/tests/test_dist_metrics.py @@ -9,24 +9,13 @@ from sklearn.neighbors.dist_metrics import DistanceMetric from sklearn.neighbors import BallTree from sklearn.utils.testing import SkipTest, assert_raises_regex +from sklearn.utils.version import parse_version def dist_func(x1, x2, p): return np.sum((x1 - x2) ** p) ** (1. / p) -def cmp_version(version1, version2): - version1 = tuple(map(int, version1.split('.')[:2])) - version2 = tuple(map(int, version2.split('.')[:2])) - - if version1 < version2: - return -1 - elif version1 > version2: - return 1 - else: - return 0 - - class TestMetrics: def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5, rseed=0, dtype=np.float64): @@ -70,7 +59,8 @@ def test_cdist(self): yield self.check_cdist_bool, metric, D_true def check_cdist(self, metric, kwargs, D_true): - if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0: + if metric == 'canberra' and \ + parse_version(scipy.__version__) <= parse_version('0.9'): raise SkipTest("Canberra distance incorrect in scipy < 0.9") dm = DistanceMetric.get_metric(metric, **kwargs) D12 = dm.pairwise(self.X1, self.X2) @@ -94,7 +84,8 @@ def test_pdist(self): yield self.check_pdist_bool, metric, D_true def check_pdist(self, metric, kwargs, D_true): - if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0: + if metric == 'canberra' and \ + parse_version(scipy.__version__) <= parse_version('0.9'): raise SkipTest("Canberra distance incorrect in scipy < 0.9") dm = DistanceMetric.get_metric(metric, **kwargs) D12 = dm.pairwise(self.X1) diff --git a/sklearn/preprocessing/label.py b/sklearn/preprocessing/label.py index f2f7d9afad347..32679f7bb8779 100644 --- a/sklearn/preprocessing/label.py +++ b/sklearn/preprocessing/label.py @@ -19,7 +19,7 @@ from ..utils.fixes import sparse_min_max from ..utils.fixes import astype from ..utils.fixes import in1d -from ..utils import column_or_1d +from ..utils import column_or_1d, parse_version from ..utils.validation import check_array from ..utils.validation import check_is_fitted from ..utils.validation import _num_samples @@ -47,7 +47,7 @@ def _check_numpy_unicode_bug(labels): https://github.com/numpy/numpy/pull/243 """ - if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U': + if np_version < parse_version('1.7.0') and labels.dtype.kind == 'U': raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted" " on unicode data correctly. Please upgrade" " NumPy to use LabelEncoder with unicode inputs.") diff --git a/sklearn/preprocessing/tests/test_data.py b/sklearn/preprocessing/tests/test_data.py index 7a51049b60242..5ab14400fa9b0 100644 --- a/sklearn/preprocessing/tests/test_data.py +++ b/sklearn/preprocessing/tests/test_data.py @@ -9,7 +9,6 @@ import numpy as np import numpy.linalg as la from scipy import sparse -from distutils.version import LooseVersion from sklearn.utils import gen_batches @@ -29,6 +28,7 @@ from sklearn.utils.testing import assert_no_warnings from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import skip_if_32bit +from sklearn.utils.version import parse_version from sklearn.utils.sparsefuncs import mean_variance_axis from sklearn.preprocessing.data import _transform_selected @@ -206,7 +206,7 @@ def test_standard_scaler_numerical_stability(): # was empirically found to cause numerical problems with np.mean & np.std. x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64) - if LooseVersion(np.__version__) >= LooseVersion('1.9'): + if parse_version(np.__version__) >= parse_version('1.9'): # This does not raise a warning as the number of samples is too low # to trigger the problem in recent numpy x_scaled = assert_no_warnings(scale, x) diff --git a/sklearn/utils/__init__.py b/sklearn/utils/__init__.py index a4e5b6a4f3ea5..d453db0483142 100644 --- a/sklearn/utils/__init__.py +++ b/sklearn/utils/__init__.py @@ -13,6 +13,7 @@ check_random_state, column_or_1d, check_array, check_consistent_length, check_X_y, indexable, check_symmetric) +from .version import parse_version from .class_weight import compute_class_weight, compute_sample_weight from ..externals.joblib import cpu_count from ..exceptions import DataConversionWarning @@ -25,7 +26,7 @@ "compute_class_weight", "compute_sample_weight", "column_or_1d", "safe_indexing", "check_consistent_length", "check_X_y", 'indexable', - "check_symmetric", "indices_to_mask", "deprecated"] + "check_symmetric", "indices_to_mask", "deprecated", "parse_version"] def safe_mask(X, mask): diff --git a/sklearn/utils/arpack.py b/sklearn/utils/arpack.py index 04fef0ae0d85f..6b8a0bea7c5be 100644 --- a/sklearn/utils/arpack.py +++ b/sklearn/utils/arpack.py @@ -52,7 +52,7 @@ import scipy import functools import operator -from distutils.version import LooseVersion +from .version import parse_version __docformat__ = "restructuredtext en" @@ -276,9 +276,9 @@ # CHECK IF BACKPORT IS ACTUALLY NEEDED -if scipy.version.version >= LooseVersion('0.12'): +if parse_version(scipy.version.version) >= parse_version('0.12'): BACKPORT_TO = None -elif scipy.version.version >= LooseVersion('0.11'): +elif parse_version(scipy.version.version) >= parse_version('0.11'): BACKPORT_TO = '0.10' else: BACKPORT_TO = '0.09' @@ -1853,7 +1853,7 @@ def matvec_XH_X(x): # Redefine the backported function -if scipy.version.version >= LooseVersion('0.12'): +if parse_version(scipy.version.version) >= parse_version('0.12'): from scipy.sparse.linalg import eigs, eigsh, svds else: eigs, eigsh, svds = _eigs, _eigsh, _svds diff --git a/sklearn/utils/extmath.py b/sklearn/utils/extmath.py index df1f56dbcb891..33c37bfe15482 100644 --- a/sklearn/utils/extmath.py +++ b/sklearn/utils/extmath.py @@ -26,6 +26,7 @@ from .sparsefuncs_fast import csr_row_norms from .validation import check_array from ..exceptions import NonBLASDotWarning +from .version import parse_version def norm(x): @@ -40,7 +41,7 @@ def norm(x): # Newer NumPy has a ravel that needs less copying. -if np_version < (1, 7, 1): +if np_version < parse_version('1.7.1'): _ravel = np.ravel else: _ravel = partial(np.ravel, order='K') @@ -131,7 +132,7 @@ def _have_blas_gemm(): # Only use fast_dot for older NumPy; newer ones have tackled the speed issue. -if np_version < (1, 7, 2) and _have_blas_gemm(): +if np_version < parse_version('1.7.2') and _have_blas_gemm(): def fast_dot(A, B): """Compute fast dot products directly calling BLAS. @@ -860,7 +861,7 @@ def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08): Absolute tolerance, see ``np.allclose`` """ # sum is as unstable as cumsum for numpy < 1.9 - if np_version < (1, 9): + if np_version < parse_version('1.9'): return np.cumsum(arr, axis=axis, dtype=np.float64) out = np.cumsum(arr, axis=axis, dtype=np.float64) diff --git a/sklearn/utils/fixes.py b/sklearn/utils/fixes.py index 7f1fe8eb964ab..28784f77c38d6 100644 --- a/sklearn/utils/fixes.py +++ b/sklearn/utils/fixes.py @@ -20,6 +20,8 @@ import scipy.sparse as sp import scipy +from .version import parse_version + try: from inspect import signature except ImportError: @@ -37,8 +39,8 @@ def _parse_version(version_string): return tuple(version) -np_version = _parse_version(np.__version__) -sp_version = _parse_version(scipy.__version__) +np_version = parse_version(np.__version__) +sp_version = parse_version(scipy.__version__) try: @@ -204,7 +206,7 @@ def partition(a, kth, axis=-1, kind='introselect', order=None): return np.sort(a, axis=axis, order=order) -if np_version < (1, 7): +if np_version < parse_version('1.7'): # Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg. def frombuffer_empty(buf, dtype): if len(buf) == 0: @@ -215,7 +217,7 @@ def frombuffer_empty(buf, dtype): frombuffer_empty = np.frombuffer -if np_version < (1, 8): +if np_version < parse_version('1.8'): def in1d(ar1, ar2, assume_unique=False, invert=False): # Backport of numpy function in1d 1.8.1 to support numpy 1.6.2 # Ravel both arrays, behavior for the first array could be different @@ -260,7 +262,7 @@ def in1d(ar1, ar2, assume_unique=False, invert=False): from numpy import in1d -if sp_version < (0, 15): +if sp_version < parse_version('0.15'): # Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142 from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr else: @@ -272,7 +274,7 @@ def parallel_helper(obj, methodname, *args, **kwargs): return getattr(obj, methodname)(*args, **kwargs) -if np_version < (1, 6, 2): +if np_version < parse_version('1.6.2'): # Allow bincount to accept empty arrays # https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040 def bincount(x, weights=None, minlength=None): @@ -310,7 +312,7 @@ def makedirs(name, mode=0o777, exist_ok=False): raise -if np_version < (1, 8, 1): +if np_version < parse_version('1.8.1'): def array_equal(a1, a2): # copy-paste from numpy 1.8.1 try: @@ -323,7 +325,7 @@ def array_equal(a1, a2): else: from numpy import array_equal -if sp_version < (0, 13, 0): +if sp_version < parse_version('0.13.0'): def rankdata(a, method='average'): if method not in ('average', 'min', 'max', 'dense', 'ordinal'): raise ValueError('unknown method "{0}"'.format(method)) @@ -360,7 +362,7 @@ def rankdata(a, method='average'): from scipy.stats import rankdata -if np_version < (1, 12): +if np_version < parse_version('1.12'): class MaskedArray(np.ma.MaskedArray): # Before numpy 1.12, np.ma.MaskedArray object is not picklable # This fix is needed to make our model_selection.GridSearchCV diff --git a/sklearn/utils/tests/test_extmath.py b/sklearn/utils/tests/test_extmath.py index 0ba3a9e71e18a..09ede21e6c38d 100644 --- a/sklearn/utils/tests/test_extmath.py +++ b/sklearn/utils/tests/test_extmath.py @@ -36,6 +36,7 @@ from sklearn.utils.extmath import _deterministic_vector_sign_flip from sklearn.utils.extmath import softmax from sklearn.utils.extmath import stable_cumsum +from sklearn.utils.version import parse_version from sklearn.datasets.samples_generator import make_low_rank_matrix @@ -650,7 +651,7 @@ def test_softmax(): def test_stable_cumsum(): - if np_version < (1, 9): + if np_version < parse_version('1.9'): raise SkipTest("Sum is as unstable as cumsum for numpy < 1.9") assert_array_equal(stable_cumsum([1, 2, 3]), np.cumsum([1, 2, 3])) r = np.random.RandomState(0).rand(100000) diff --git a/sklearn/utils/tests/test_version.py b/sklearn/utils/tests/test_version.py new file mode 100644 index 0000000000000..3328b266657b0 --- /dev/null +++ b/sklearn/utils/tests/test_version.py @@ -0,0 +1,12 @@ +from sklearn.utils.testing import assert_true +from sklearn.utils.version import parse_version + + +def test_pre_release(): + assert_true(parse_version('1.12b1') < parse_version('1.12')) + assert_true(parse_version('1.12.0b1') < parse_version('1.12')) + + +def test_cmp(): + assert_true(parse_version('1.12') < parse_version('1.13')) + assert_true(parse_version('1.02') == parse_version('1.2')) diff --git a/sklearn/utils/version.py b/sklearn/utils/version.py new file mode 100644 index 0000000000000..84a9da81d6199 --- /dev/null +++ b/sklearn/utils/version.py @@ -0,0 +1,361 @@ +from __future__ import absolute_import, division, print_function + +import collections +import itertools +import re + + +__all__ = [ + "parse_version", "Version", "InvalidVersion", "VERSION_PATTERN" +] + + +_Version = collections.namedtuple( + "_Version", + ["epoch", "release", "dev", "pre", "post", "local"], +) + + +def parse_version(version): + """ + Parse the given version string and return a :class:`Version` object. + """ + return Version(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion(object): + + def __hash__(self): + return hash(self._key) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) + + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) + + def __ne__(self, other): + return self._compare(other, lambda s, o: s != o) + + def _compare(self, other, method): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return method(self._key, other._key) + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(
+        r"^\s*" + VERSION_PATTERN + r"\s*$",
+        re.VERBOSE | re.IGNORECASE,
+    )
+
+    def __init__(self, version):
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(
+                match.group("pre_l"),
+                match.group("pre_n"),
+            ),
+            post=_parse_letter_version(
+                match.group("post_l"),
+                match.group("post_n1") or match.group("post_n2"),
+            ),
+            dev=_parse_letter_version(
+                match.group("dev_l"),
+                match.group("dev_n"),
+            ),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self):
+        return "".format(repr(str(self)))
+
+    def __str__(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        # Pre-release
+        if self._version.pre is not None:
+            parts.append("".join(str(x) for x in self._version.pre))
+
+        # Post-release
+        if self._version.post is not None:
+            parts.append(".post{0}".format(self._version.post[1]))
+
+        # Development release
+        if self._version.dev is not None:
+            parts.append(".dev{0}".format(self._version.dev[1]))
+
+        # Local version segment
+        if self._version.local is not None:
+            parts.append(
+                "+{0}".format(".".join(str(x) for x in self._version.local))
+            )
+
+        return "".join(parts)
+
+    @property
+    def public(self):
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        return "".join(parts)
+
+    @property
+    def local(self):
+        version_string = str(self)
+        if "+" in version_string:
+            return version_string.split("+", 1)[1]
+
+    @property
+    def is_prerelease(self):
+        return bool(self._version.dev or self._version.pre)
+
+    @property
+    def is_postrelease(self):
+        return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_seperators.split(local)
+        )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    release = tuple(
+        reversed(list(
+            itertools.dropwhile(
+                lambda x: x == 0,
+                reversed(release),
+            )
+        ))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        pre = -Infinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        pre = Infinity
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        post = -Infinity
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        dev = Infinity
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        local = -Infinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        local = tuple(
+            (i, "") if isinstance(i, int) else (-Infinity, i)
+            for i in local
+        )
+
+    return epoch, release, pre, post, dev, local
+
+
+class Infinity(object):
+
+    def __repr__(self):
+        return "Infinity"
+
+    def __hash__(self):
+        return hash(repr(self))
+
+    def __lt__(self, other):
+        return False
+
+    def __le__(self, other):
+        return False
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__)
+
+    def __ne__(self, other):
+        return not isinstance(other, self.__class__)
+
+    def __gt__(self, other):
+        return True
+
+    def __ge__(self, other):
+        return True
+
+    def __neg__(self):
+        return NegativeInfinity
+
+Infinity = Infinity()
+
+
+class NegativeInfinity(object):
+
+    def __repr__(self):
+        return "-Infinity"
+
+    def __hash__(self):
+        return hash(repr(self))
+
+    def __lt__(self, other):
+        return True
+
+    def __le__(self, other):
+        return True
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__)
+
+    def __ne__(self, other):
+        return not isinstance(other, self.__class__)
+
+    def __gt__(self, other):
+        return False
+
+    def __ge__(self, other):
+        return False
+
+    def __neg__(self):
+        return Infinity
+
+NegativeInfinity = NegativeInfinity()