Skip to content

MNT Enforce ruff rules (RUF) #30694

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Mar 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions build_tools/circle/list_versions.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,8 @@ def get_file_size(version):
"Web-based documentation is available for versions listed below:\n",
]

ROOT_URL = (
"https://api.github.com/repos/scikit-learn/scikit-learn.github.io/contents/" # noqa
)
RAW_FMT = "https://raw.githubusercontent.com/scikit-learn/scikit-learn.github.io/master/%s/index.html" # noqa
ROOT_URL = "https://api.github.com/repos/scikit-learn/scikit-learn.github.io/contents/"
RAW_FMT = "https://raw.githubusercontent.com/scikit-learn/scikit-learn.github.io/master/%s/index.html"
VERSION_RE = re.compile(r"scikit-learn ([\w\.\-]+) documentation</title>")
NAMED_DIRS = ["dev", "stable"]

Expand Down
4 changes: 2 additions & 2 deletions build_tools/update_environments_and_lock_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -643,9 +643,9 @@ def write_pip_lock_file(build_metadata):

json_output = execute_command(["conda", "info", "--json"])
conda_info = json.loads(json_output)
environment_folder = [
environment_folder = next(
each for each in conda_info["envs"] if each.endswith(environment_name)
][0]
)
environment_path = Path(environment_folder)
pip_compile_path = environment_path / "bin" / "pip-compile"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@
for idx, result in enumerate(results):
cv_results = result["cv_results"].round(3)
model_name = result["model"]
param_name = list(param_grids[model_name].keys())[0]
param_name = next(iter(param_grids[model_name].keys()))
cv_results[param_name] = cv_results["param_" + param_name]
cv_results["model"] = model_name

Expand Down
2 changes: 1 addition & 1 deletion examples/linear_model/plot_sgdocsvm_vs_ocsvm.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
benefits of such an approximation in terms of computation time but rather to
show that we obtain similar results on a toy dataset.

""" # noqa: E501
"""

# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
Expand Down
4 changes: 2 additions & 2 deletions examples/model_selection/plot_grid_search_stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,8 +230,8 @@ def compute_corrected_ttest(differences, df, n_train, n_test):

n = differences.shape[0] # number of test sets
df = n - 1
n_train = len(list(cv.split(X, y))[0][0])
n_test = len(list(cv.split(X, y))[0][1])
n_train = len(next(iter(cv.split(X, y)))[0])
n_test = len(next(iter(cv.split(X, y)))[1])

t_stat, p_val = compute_corrected_ttest(differences, df, n_train, n_test)
print(f"Corrected t-value: {t_stat:.3f}\nCorrected p-value: {p_val:.3f}")
Expand Down
2 changes: 1 addition & 1 deletion examples/neighbors/plot_species_kde.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
""" # noqa: E501
"""

# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
Expand Down
9 changes: 8 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ preview = true
# This enables us to use the explicit preview rules that we want only
explicit-preview-rules = true
# all rules can be found here: https://beta.ruff.rs/docs/rules/
select = ["E", "F", "W", "I", "CPY001"]
select = ["E", "F", "W", "I", "CPY001", "RUF"]
ignore=[
# space before : (needed for how black formats slicing)
"E203",
Expand All @@ -163,6 +163,13 @@ ignore=[
# F841 is in preview (july 2024), and we don't care much about it.
# Local variable ... is assigned to but never used
"F841",
# some RUF rules trigger too many changes
"RUF002",
"RUF003",
"RUF005",
"RUF012",
"RUF015",
"RUF021",
]

[tool.ruff.lint.flake8-copyright]
Expand Down
12 changes: 6 additions & 6 deletions sklearn/_loss/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,14 @@
)

__all__ = [
"HalfSquaredError",
"AbsoluteError",
"PinballLoss",
"HuberLoss",
"HalfPoissonLoss",
"HalfBinomialLoss",
"HalfGammaLoss",
"HalfMultinomialLoss",
"HalfPoissonLoss",
"HalfSquaredError",
"HalfTweedieLoss",
"HalfTweedieLossIdentity",
"HalfBinomialLoss",
"HalfMultinomialLoss",
"HuberLoss",
"PinballLoss",
]
18 changes: 9 additions & 9 deletions sklearn/cluster/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,21 +26,24 @@
from ._spectral import SpectralClustering, spectral_clustering

__all__ = [
"DBSCAN",
"HDBSCAN",
"OPTICS",
"AffinityPropagation",
"AgglomerativeClustering",
"Birch",
"DBSCAN",
"OPTICS",
"cluster_optics_dbscan",
"cluster_optics_xi",
"compute_optics_graph",
"KMeans",
"BisectingKMeans",
"FeatureAgglomeration",
"KMeans",
"MeanShift",
"MiniBatchKMeans",
"SpectralBiclustering",
"SpectralClustering",
"SpectralCoclustering",
"affinity_propagation",
"cluster_optics_dbscan",
"cluster_optics_xi",
"compute_optics_graph",
"dbscan",
"estimate_bandwidth",
"get_bin_seeds",
Expand All @@ -50,7 +53,4 @@
"mean_shift",
"spectral_clustering",
"ward_tree",
"SpectralBiclustering",
"SpectralCoclustering",
"HDBSCAN",
]
2 changes: 1 addition & 1 deletion sklearn/cluster/_bicluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from ..utils.validation import assert_all_finite, validate_data
from ._kmeans import KMeans, MiniBatchKMeans

__all__ = ["SpectralCoclustering", "SpectralBiclustering"]
__all__ = ["SpectralBiclustering", "SpectralCoclustering"]


def _scale_normalize(X):
Expand Down
2 changes: 1 addition & 1 deletion sklearn/compose/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

__all__ = [
"ColumnTransformer",
"make_column_transformer",
"TransformedTargetRegressor",
"make_column_selector",
"make_column_transformer",
]
8 changes: 3 additions & 5 deletions sklearn/compose/_column_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
check_is_fitted,
)

__all__ = ["ColumnTransformer", "make_column_transformer", "make_column_selector"]
__all__ = ["ColumnTransformer", "make_column_selector", "make_column_transformer"]


_ERR_MSG_1DCOLUMN = (
Expand Down Expand Up @@ -1352,10 +1352,8 @@ def _is_empty_column_selection(column):
if hasattr(column, "dtype") and np.issubdtype(column.dtype, np.bool_):
return not column.any()
elif hasattr(column, "__len__"):
return (
len(column) == 0
or all(isinstance(col, bool) for col in column)
and not any(column)
return len(column) == 0 or (
all(isinstance(col, bool) for col in column) and not any(column)
)
else:
return False
Expand Down
2 changes: 1 addition & 1 deletion sklearn/compose/tests/test_column_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,7 +361,7 @@ def test_column_transformer_empty_columns(pandas, column_selection, callable_col
X = X_array

if callable_column:
column = lambda X: column_selection # noqa
column = lambda X: column_selection
else:
column = column_selection

Expand Down
2 changes: 1 addition & 1 deletion sklearn/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def pytest_collection_modifyitems(config, items):
marker = pytest.mark.xfail(
reason=(
"know failure. See "
"https://github.com/scikit-learn/scikit-learn/issues/17797" # noqa
"https://github.com/scikit-learn/scikit-learn/issues/17797"
)
)
item.add_marker(marker)
Expand Down
2 changes: 1 addition & 1 deletion sklearn/covariance/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,13 @@
)

__all__ = [
"OAS",
"EllipticEnvelope",
"EmpiricalCovariance",
"GraphicalLasso",
"GraphicalLassoCV",
"LedoitWolf",
"MinCovDet",
"OAS",
"ShrunkCovariance",
"empirical_covariance",
"fast_mcd",
Expand Down
2 changes: 1 addition & 1 deletion sklearn/cross_decomposition/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,4 @@

from ._pls import CCA, PLSSVD, PLSCanonical, PLSRegression

__all__ = ["PLSCanonical", "PLSRegression", "PLSSVD", "CCA"]
__all__ = ["CCA", "PLSSVD", "PLSCanonical", "PLSRegression"]
2 changes: 1 addition & 1 deletion sklearn/cross_decomposition/_pls.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
from ..utils.fixes import parse_version, sp_version
from ..utils.validation import FLOAT_DTYPES, check_is_fitted, validate_data

__all__ = ["PLSCanonical", "PLSRegression", "PLSSVD"]
__all__ = ["PLSSVD", "PLSCanonical", "PLSRegression"]


if sp_version >= parse_version("1.7"):
Expand Down
14 changes: 7 additions & 7 deletions sklearn/datasets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,22 +61,22 @@
"dump_svmlight_file",
"fetch_20newsgroups",
"fetch_20newsgroups_vectorized",
"fetch_california_housing",
"fetch_covtype",
"fetch_file",
"fetch_kddcup99",
"fetch_lfw_pairs",
"fetch_lfw_people",
"fetch_olivetti_faces",
"fetch_species_distributions",
"fetch_california_housing",
"fetch_covtype",
"fetch_rcv1",
"fetch_kddcup99",
"fetch_openml",
"fetch_rcv1",
"fetch_species_distributions",
"get_data_home",
"load_breast_cancer",
"load_diabetes",
"load_digits",
"load_files",
"load_iris",
"load_breast_cancer",
"load_linnerud",
"load_sample_image",
"load_sample_images",
Expand All @@ -85,9 +85,9 @@
"load_wine",
"make_biclusters",
"make_blobs",
"make_checkerboard",
"make_circles",
"make_classification",
"make_checkerboard",
"make_friedman1",
"make_friedman2",
"make_friedman3",
Expand Down
2 changes: 1 addition & 1 deletion sklearn/datasets/_kddcup99.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,7 @@ def _fetch_brute_kddcup99(
except Exception as e:
raise OSError(
"The cache for fetch_kddcup99 is invalid, please delete "
f"{str(kddcup_dir)} and run the fetch_kddcup99 again"
f"{kddcup_dir} and run the fetch_kddcup99 again"
) from e

elif download_if_missing:
Expand Down
2 changes: 1 addition & 1 deletion sklearn/datasets/_openml.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import numpy as np

from ..utils import Bunch
from ..utils._optional_dependencies import check_pandas_support # noqa
from ..utils._optional_dependencies import check_pandas_support
from ..utils._param_validation import (
Integral,
Interval,
Expand Down
6 changes: 2 additions & 4 deletions sklearn/datasets/_svmlight_format_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -384,10 +384,8 @@ def get_data():
for f in files
]

if (
zero_based is False
or zero_based == "auto"
and all(len(tmp[1]) and np.min(tmp[1]) > 0 for tmp in r)
if zero_based is False or (
zero_based == "auto" and all(len(tmp[1]) and np.min(tmp[1]) > 0 for tmp in r)
):
for _, indices, _, _, _ in r:
indices -= 1
Expand Down
2 changes: 1 addition & 1 deletion sklearn/datasets/tests/test_kddcup99.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def test_corrupted_file_error_message(fetch_kddcup99_fxt, tmp_path):

msg = (
"The cache for fetch_kddcup99 is invalid, please "
f"delete {str(kddcup99_dir)} and run the fetch_kddcup99 again"
f"delete {kddcup99_dir} and run the fetch_kddcup99 again"
)

with pytest.raises(OSError, match=msg):
Expand Down
10 changes: 5 additions & 5 deletions sklearn/decomposition/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,24 +31,24 @@
from ._truncated_svd import TruncatedSVD

__all__ = [
"NMF",
"PCA",
"DictionaryLearning",
"FactorAnalysis",
"FastICA",
"IncrementalPCA",
"KernelPCA",
"LatentDirichletAllocation",
"MiniBatchDictionaryLearning",
"MiniBatchNMF",
"MiniBatchSparsePCA",
"NMF",
"PCA",
"SparseCoder",
"SparsePCA",
"TruncatedSVD",
"dict_learning",
"dict_learning_online",
"fastica",
"non_negative_factorization",
"randomized_svd",
"sparse_encode",
"FactorAnalysis",
"TruncatedSVD",
"LatentDirichletAllocation",
]
2 changes: 1 addition & 1 deletion sklearn/decomposition/_fastica.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
from ..utils._param_validation import Interval, Options, StrOptions, validate_params
from ..utils.validation import check_is_fitted, validate_data

__all__ = ["fastica", "FastICA"]
__all__ = ["FastICA", "fastica"]


def _gs_decorrelation(w, W, j):
Expand Down
2 changes: 1 addition & 1 deletion sklearn/decomposition/tests/test_fastica.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def test_fastica_simple(add_noise, global_random_seed, global_dtype):
pytest.xfail(
"FastICA instability with Ubuntu Atlas build with float32 "
"global_dtype. For more details, see "
"https://github.com/scikit-learn/scikit-learn/issues/24131#issuecomment-1208091119" # noqa
"https://github.com/scikit-learn/scikit-learn/issues/24131#issuecomment-1208091119"
)

# Test the FastICA algorithm on very simple data.
Expand Down
24 changes: 12 additions & 12 deletions sklearn/ensemble/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,23 +23,23 @@
from ._weight_boosting import AdaBoostClassifier, AdaBoostRegressor

__all__ = [
"AdaBoostClassifier",
"AdaBoostRegressor",
"BaggingClassifier",
"BaggingRegressor",
"BaseEnsemble",
"RandomForestClassifier",
"RandomForestRegressor",
"RandomTreesEmbedding",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"BaggingClassifier",
"BaggingRegressor",
"IsolationForest",
"GradientBoostingClassifier",
"GradientBoostingRegressor",
"AdaBoostClassifier",
"AdaBoostRegressor",
"VotingClassifier",
"VotingRegressor",
"StackingClassifier",
"StackingRegressor",
"HistGradientBoostingClassifier",
"HistGradientBoostingRegressor",
"IsolationForest",
"RandomForestClassifier",
"RandomForestRegressor",
"RandomTreesEmbedding",
"StackingClassifier",
"StackingRegressor",
"VotingClassifier",
"VotingRegressor",
]
Loading