Skip to content

Apply ruff/Perflint rules (PERF) #582

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Oct 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 1 addition & 4 deletions examples/interfaces/process_ds105.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,10 +308,7 @@ def process_subject(ddef, study_def, ana_def):


def get_subjects(data_path, subj_ids, study_def, ana_def):
ddefs = []
for subj_id in subj_ids:
ddefs.append(get_fdata(data_path, subj_id))
return ddefs
return [get_fdata(data_path, subj_id) for subj_id in subj_ids]


def main():
Expand Down
3 changes: 1 addition & 2 deletions nipy/algorithms/clustering/gmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -848,8 +848,7 @@ def show_components(self, x, gd, density=None, mpaxes=None):
fontsize=12)

legend = ['data']
for k in range(self.k):
legend.append('component %d' % (k + 1))
legend.extend(f'component {k}' for k in range(1, self.k + 1))
l = ax.legend(tuple(legend))
for t in l.get_texts():
t.set_fontsize(12)
Expand Down
4 changes: 1 addition & 3 deletions nipy/algorithms/clustering/hierarchical_clustering.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,10 +221,8 @@
way such that parent[i]>i for all i
Only the leaves are listeed, not the subtrees themselves
"""
lst = []
lst = [np.array([], np.int_) for i in range(self.V)]

Check warning on line 224 in nipy/algorithms/clustering/hierarchical_clustering.py

View check run for this annotation

Codecov / codecov/patch

nipy/algorithms/clustering/hierarchical_clustering.py#L224

Added line #L224 was not covered by tests
n = np.sum(self.isleaf())
for i in range(self.V):
lst.append(np.array([], np.int_))
for i in range(n):
lst[i] = np.array([i], np.int_)
for i in range(self.V - 1):
Expand Down
4 changes: 1 addition & 3 deletions nipy/algorithms/graph/forest.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,9 +155,7 @@ def get_descendants(self, v, exclude_self=False):
else:
desc = [v]
for w in self.children[v]:
temp = self.get_descendants(w)
for q in temp:
desc.append(q)
desc.extend(self.get_descendants(w))
desc.sort()
if exclude_self and v in desc:
desc = [i for i in desc if i != v]
Expand Down
8 changes: 2 additions & 6 deletions nipy/algorithms/graph/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -1221,9 +1221,7 @@ def left_incidence(self):
list[[e.0.0, .., e.0.i(0)], .., [e.V.0, E.V.i(V)]] where e.i.j is
the set of edge indexes so that e.i.j[0] = i
"""
linc = []
for i in range(self.V):
linc.append([])
linc = [[] for i in range(self.V)]
for e in range(self.E):
i = self.edges[e, 0]
a = linc[i]
Expand All @@ -1240,9 +1238,7 @@ def right_incidence(self):
list[[e.0.0, .., e.0.i(0)], .., [e.V.0, E.V.i(V)]] where e.i.j is
the set of edge indexes so that e.i.j[1] = i
"""
rinc = []
for i in range(self.V):
rinc.append([])
rinc = [[] for i in range(self.V)]
for e in range(self.E):
i = self.edges[e, 1]
a = rinc[i]
Expand Down
16 changes: 6 additions & 10 deletions nipy/algorithms/statistics/formula/formulae.py
Original file line number Diff line number Diff line change
Expand Up @@ -302,11 +302,10 @@ def getparams(expression):
expression = expression.reshape((np.prod(expression.shape),))
for term in expression:
atoms = atoms.union(sympy.sympify(term).atoms())
params = []
for atom in atoms:
if isinstance(atom, sympy.Symbol) and not is_term(atom):
params.append(atom)
params.sort(key=default_sort_key)
params = sorted((atom
for atom in atoms
if isinstance(atom, sympy.Symbol) and not is_term(atom)),
key=default_sort_key)
return params


Expand All @@ -330,11 +329,8 @@ def getterms(expression):
expression = expression.reshape((np.prod(expression.shape),))
for e in expression:
atoms = atoms.union(e.atoms())
terms = []
for atom in atoms:
if is_term(atom):
terms.append(atom)
terms.sort(key=default_sort_key)
terms = sorted((atom for atom in atoms if is_term(atom)),
key=default_sort_key)
return terms


Expand Down
4 changes: 1 addition & 3 deletions nipy/algorithms/statistics/models/tests/test_anova.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,7 @@
#
# http://www-stat.stanford.edu/~jtaylo/courses/stats191/data/kidney.table

D = []
for row in StringIO(data):
D.append([float(val) for val in row.split()])
D = [[float(val) for val in row.split()] for row in StringIO(data)]
D = make_recarray(D, ['Days', 'Duration', 'Weight', 'ID'])

# Create the categorical regressors, known as Factors
Expand Down
4 changes: 1 addition & 3 deletions nipy/algorithms/statistics/tests/test_intrinsic_volumes.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,7 @@ def randorth(p=10):

def box(shape, edges):
data = np.zeros(shape)
sl = []
for i in range(len(shape)):
sl.append(slice(edges[i][0], edges[i][1],1))
sl = [slice(edges[i][0], edges[i][1],1) for i in range(len(shape))]
data[tuple(sl)] = 1
return data.astype(np.int_)

Expand Down
4 changes: 1 addition & 3 deletions nipy/algorithms/utils/matrices.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,7 @@ def full_rank(X, r=None):
V, D, U = spl.svd(X, full_matrices=0)
order = np.argsort(D)
order = order[::-1]
value = []
for i in range(r):
value.append(V[:,order[i]])
value = [V[:,order[i]] for i in range(r)]
return np.asarray(np.transpose(value)).astype(np.float64)


Expand Down
5 changes: 1 addition & 4 deletions nipy/interfaces/spm.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,4 @@


def fnames_presuffix(fnames, prefix='', suffix=''):
f2 = []
for fname in fnames:
f2.append(fname_presuffix(fname, prefix, suffix))
return f2
return [fname_presuffix(fname, prefix, suffix) for fname in fnames]

Check warning on line 100 in nipy/interfaces/spm.py

View check run for this annotation

Codecov / codecov/patch

nipy/interfaces/spm.py#L100

Added line #L100 was not covered by tests
8 changes: 4 additions & 4 deletions nipy/io/nifti_ref.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,10 +345,10 @@ def nipy2nifti(img, data_dtype=None, strict=None, fix0=True):
# Use list() to get .index method for python < 2.6
input_names = list(coordmap.function_domain.coord_names)
spatial_names = input_names[:3]
dim_infos = []
for fps in 'freq', 'phase', 'slice':
dim_infos.append(
spatial_names.index(fps) if fps in spatial_names else None)
dim_infos = [
spatial_names.index(fps) if fps in spatial_names else None
for fps in ('freq', 'phase', 'slice')
]
hdr.set_dim_info(*dim_infos)
# Set units without knowing time
hdr.set_xyzt_units(xyz='mm')
Expand Down
9 changes: 5 additions & 4 deletions nipy/labs/utils/reproducibility_measures.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,10 +128,11 @@ def get_cluster_position_from_thresholded_map(stat_map, domain, thr=3.0,
coord = thresholded_domain.get_coord()

# get the barycenters
baryc = []
for i in range(label.max() + 1):
if np.sum(label == i) >= csize:
baryc.append(np.mean(coord[label == i], 0))
baryc = [
np.mean(coord[label == i], 0)
for i in range(label.max() + 1)
if np.sum(label == i) >= csize
]

if len(baryc) == 0:
return None
Expand Down
4 changes: 1 addition & 3 deletions nipy/modalities/fmri/design_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,9 +131,7 @@ def _make_drift(drift_model, frametimes, order=1, hfcut=128.):
drift = _blank_drift(frametimes)
else:
raise NotImplementedError(f"Unknown drift model {drift_model!r}")
names = []
for k in range(drift.shape[1] - 1):
names.append('drift_%d' % (k + 1))
names = [f'drift_{k}' for k in range(1, drift.shape[1])]
names.append('constant')
return drift, names

Expand Down
Loading