From b41f81eb7008cfbc00e1d0c49a56e3b0ba2af137 Mon Sep 17 00:00:00 2001 From: Sahil Kang Date: Wed, 22 Jun 2016 23:13:38 -0700 Subject: [PATCH] Use logging.info instead of print (#6929) --- sklearn/datasets/california_housing.py | 3 ++- sklearn/datasets/kddcup99.py | 2 +- sklearn/datasets/olivetti_faces.py | 3 ++- sklearn/datasets/species_distributions.py | 7 ++++--- sklearn/datasets/twenty_newsgroups.py | 8 ++++---- 5 files changed, 13 insertions(+), 10 deletions(-) diff --git a/sklearn/datasets/california_housing.py b/sklearn/datasets/california_housing.py index ee1bf5bde4514..d6cdc0414143b 100644 --- a/sklearn/datasets/california_housing.py +++ b/sklearn/datasets/california_housing.py @@ -26,6 +26,7 @@ from os.path import exists from os import makedirs import tarfile +import logging try: # Python 2 @@ -90,7 +91,7 @@ def fetch_california_housing(data_home=None, download_if_missing=True): makedirs(data_home) filepath = _pkl_filepath(data_home, TARGET_FILENAME) if not exists(filepath): - print('downloading Cal. housing from %s to %s' % (DATA_URL, data_home)) + logging.info('downloading Cal. housing from %s to %s' % (DATA_URL, data_home)) archive_fileobj = BytesIO(urlopen(DATA_URL).read()) fileobj = tarfile.open( mode="r:gz", diff --git a/sklearn/datasets/kddcup99.py b/sklearn/datasets/kddcup99.py index 06b97da950b8d..ea18700511483 100644 --- a/sklearn/datasets/kddcup99.py +++ b/sklearn/datasets/kddcup99.py @@ -329,7 +329,7 @@ def _fetch_brute_kddcup99(subset=None, data_home=None, line = line.decode() Xy.append(line.replace('\n', '').split(',')) file_.close() - print('extraction done') + logging.info('extraction done') Xy = np.asarray(Xy, dtype=object) for j in range(42): Xy[:, j] = Xy[:, j].astype(DT[j]) diff --git a/sklearn/datasets/olivetti_faces.py b/sklearn/datasets/olivetti_faces.py index 978a00db3ca14..dfbc09ff8a89b 100644 --- a/sklearn/datasets/olivetti_faces.py +++ b/sklearn/datasets/olivetti_faces.py @@ -25,6 +25,7 @@ from io import BytesIO from os.path import exists from os import makedirs +import logging try: # Python 2 import urllib2 @@ -111,7 +112,7 @@ def fetch_olivetti_faces(data_home=None, shuffle=False, random_state=0, makedirs(data_home) filepath = _pkl_filepath(data_home, TARGET_FILENAME) if not exists(filepath): - print('downloading Olivetti faces from %s to %s' + logging.info('downloading Olivetti faces from %s to %s' % (DATA_URL, data_home)) fhandle = urlopen(DATA_URL) buf = BytesIO(fhandle.read()) diff --git a/sklearn/datasets/species_distributions.py b/sklearn/datasets/species_distributions.py index 3d10528e7b682..a6507e295d1ad 100644 --- a/sklearn/datasets/species_distributions.py +++ b/sklearn/datasets/species_distributions.py @@ -38,6 +38,7 @@ from io import BytesIO from os import makedirs from os.path import exists +import logging try: # Python 2 @@ -222,7 +223,7 @@ def fetch_species_distributions(data_home=None, archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME) if not exists(archive_path): - print('Downloading species data from %s to %s' % (SAMPLES_URL, + logging.info('Downloading species data from %s to %s' % (SAMPLES_URL, data_home)) X = np.load(BytesIO(urlopen(SAMPLES_URL).read())) @@ -233,7 +234,7 @@ def fetch_species_distributions(data_home=None, if 'test' in f: test = _load_csv(fhandle) - print('Downloading coverage data from %s to %s' % (COVERAGES_URL, + logging.info('Downloading coverage data from %s to %s' % (COVERAGES_URL, data_home)) X = np.load(BytesIO(urlopen(COVERAGES_URL).read())) @@ -241,7 +242,7 @@ def fetch_species_distributions(data_home=None, coverages = [] for f in X.files: fhandle = BytesIO(X[f]) - print(' - converting', f) + logging.info(' - converting', f) coverages.append(_load_coverage(fhandle)) coverages = np.asarray(coverages, dtype=dtype) diff --git a/sklearn/datasets/twenty_newsgroups.py b/sklearn/datasets/twenty_newsgroups.py index 524d38d6c1ec1..bc1e9a69fabe8 100644 --- a/sklearn/datasets/twenty_newsgroups.py +++ b/sklearn/datasets/twenty_newsgroups.py @@ -212,10 +212,10 @@ def fetch_20newsgroups(data_home=None, subset='train', categories=None, compressed_content, 'zlib_codec') cache = pickle.loads(uncompressed_content) except Exception as e: - print(80 * '_') - print('Cache loading failed') - print(80 * '_') - print(e) + logging.info(80 * '_') + logging.info('Cache loading failed') + logging.info(80 * '_') + logging.info(e) if cache is None: if download_if_missing: