-
-
Notifications
You must be signed in to change notification settings - Fork 25.8k
CI Uses pytest-xdist to parallelize tests #13041
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
4a60403
38f47ab
f42dae4
75693c4
b3fef51
360daa0
765d11b
1b4a82e
271ec28
919a451
4294882
7c573e8
8b68c73
4e5e32c
5cf9558
a5311f6
4badaa9
4957cb0
3b72e69
36d97bb
70a4077
e4c49c5
0b21946
b32f961
65715b3
d6a96e4
bb65b3e
d98bf54
f749e84
ca5a880
1e0d96b
e002e27
6b964da
15222ab
bcc68b1
1c06910
969fb4e
fff1fda
9f7992f
0d9d945
4bfb9b9
4dc91ec
448d2e1
624b25f
1d4d807
57070e7
7ad5858
fac786d
4abc3f7
b4b5a2f
9ab7c5d
c4eb6e7
692d532
78b4985
d656b1e
5e686f7
6532e4e
d4b0529
70ec9cd
376fd62
3289689
6534ec6
fe39d43
2a11e7a
85052e8
97f71b2
a21e544
7a8210d
69f4880
94efac8
ceb1a99
950ae7c
b90043b
64b333c
c92b7aa
320a194
b03dac1
20e6429
f5a4b74
a9d0259
fcb3c92
085805b
fd090b7
11588a3
014b61c
59950e3
c6230a9
d8dd551
d1150aa
48a6190
46c6459
2f9f265
01743fc
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,8 +1,9 @@ | ||
@echo on | ||
|
||
@rem Only 64 bit uses conda | ||
@rem Only 64 bit uses conda and uses a python newer than 3.5 | ||
IF "%PYTHON_ARCH%"=="64" ( | ||
call activate %VIRTUALENV% | ||
set PYTEST_ARGS=%PYTEST_ARGS% -n2 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Briefly describe -n2 for future us? |
||
) | ||
|
||
mkdir %TMP_FOLDER% | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -509,7 +509,7 @@ def test_symmetry(): | |
|
||
@pytest.mark.parametrize( | ||
'name', | ||
set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS) | ||
sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS)) | ||
def test_sample_order_invariance(name): | ||
random_state = check_random_state(0) | ||
y_true = random_state.randint(0, 2, size=(20, )) | ||
|
@@ -561,7 +561,7 @@ def test_sample_order_invariance_multilabel_and_multioutput(): | |
|
||
@pytest.mark.parametrize( | ||
'name', | ||
set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS) | ||
sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS)) | ||
def test_format_invariance_with_1d_vectors(name): | ||
random_state = check_random_state(0) | ||
y1 = random_state.randint(0, 2, size=(20, )) | ||
|
@@ -636,8 +636,8 @@ def test_format_invariance_with_1d_vectors(name): | |
|
||
|
||
@pytest.mark.parametrize( | ||
'name', | ||
set(CLASSIFICATION_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS) | ||
'name', | ||
sorted(set(CLASSIFICATION_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS)) | ||
def test_classification_invariance_string_vs_numbers_labels(name): | ||
# Ensure that classification metrics with string labels are invariant | ||
random_state = check_random_state(0) | ||
|
@@ -767,22 +767,23 @@ def check_single_sample_multioutput(name): | |
|
||
|
||
@pytest.mark.parametrize( | ||
'name', | ||
(set(ALL_METRICS) | ||
# Those metrics are not always defined with one sample | ||
# or in multiclass classification | ||
- METRIC_UNDEFINED_BINARY_MULTICLASS | ||
- set(THRESHOLDED_METRICS))) | ||
'name', | ||
sorted( | ||
set(ALL_METRICS) | ||
# Those metrics are not always defined with one sample | ||
# or in multiclass classification | ||
- METRIC_UNDEFINED_BINARY_MULTICLASS - set(THRESHOLDED_METRICS))) | ||
def test_single_sample(name): | ||
check_single_sample(name) | ||
|
||
|
||
@pytest.mark.parametrize('name', MULTIOUTPUT_METRICS | MULTILABELS_METRICS) | ||
@pytest.mark.parametrize('name', | ||
sorted(MULTIOUTPUT_METRICS | MULTILABELS_METRICS)) | ||
def test_single_sample_multioutput(name): | ||
check_single_sample_multioutput(name) | ||
|
||
|
||
@pytest.mark.parametrize('name', MULTIOUTPUT_METRICS) | ||
@pytest.mark.parametrize('name', sorted(MULTIOUTPUT_METRICS)) | ||
def test_multioutput_number_of_output_differ(name): | ||
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]]) | ||
y_pred = np.array([[0, 0], [1, 0], [0, 0]]) | ||
|
@@ -791,7 +792,7 @@ def test_multioutput_number_of_output_differ(name): | |
assert_raises(ValueError, metric, y_true, y_pred) | ||
|
||
|
||
@pytest.mark.parametrize('name', MULTIOUTPUT_METRICS) | ||
@pytest.mark.parametrize('name', sorted(MULTIOUTPUT_METRICS)) | ||
def test_multioutput_regression_invariance_to_dimension_shuffling(name): | ||
# test invariance to dimension shuffling | ||
random_state = check_random_state(0) | ||
|
@@ -846,7 +847,7 @@ def test_multilabel_representation_invariance(): | |
"dense and sparse indicator formats." % name) | ||
|
||
|
||
@pytest.mark.parametrize('name', MULTILABELS_METRICS) | ||
@pytest.mark.parametrize('name', sorted(MULTILABELS_METRICS)) | ||
def test_raise_value_error_multilabel_sequences(name): | ||
# make sure the multilabel-sequence format raises ValueError | ||
multilabel_sequences = [ | ||
|
@@ -862,7 +863,7 @@ def test_raise_value_error_multilabel_sequences(name): | |
assert_raises(ValueError, metric, seq, seq) | ||
|
||
|
||
@pytest.mark.parametrize('name', METRICS_WITH_NORMALIZE_OPTION) | ||
@pytest.mark.parametrize('name', sorted(METRICS_WITH_NORMALIZE_OPTION)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is there a reason not to order the dicts? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
def test_normalize_option_binary_classification(name): | ||
# Test in the binary case | ||
n_samples = 20 | ||
|
@@ -879,7 +880,7 @@ def test_normalize_option_binary_classification(name): | |
measure) | ||
|
||
|
||
@pytest.mark.parametrize('name', METRICS_WITH_NORMALIZE_OPTION) | ||
@pytest.mark.parametrize('name', sorted(METRICS_WITH_NORMALIZE_OPTION)) | ||
def test_normalize_option_multiclass_classification(name): | ||
# Test in the multiclass case | ||
random_state = check_random_state(0) | ||
|
@@ -986,7 +987,7 @@ def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize, | |
raise ValueError("Metric is not recorded as having an average option") | ||
|
||
|
||
@pytest.mark.parametrize('name', METRICS_WITH_AVERAGING) | ||
@pytest.mark.parametrize('name', sorted(METRICS_WITH_AVERAGING)) | ||
def test_averaging_multiclass(name): | ||
n_samples, n_classes = 50, 3 | ||
random_state = check_random_state(0) | ||
|
@@ -1003,7 +1004,8 @@ def test_averaging_multiclass(name): | |
|
||
|
||
@pytest.mark.parametrize( | ||
'name', METRICS_WITH_AVERAGING | THRESHOLDED_METRICS_WITH_AVERAGING) | ||
'name', | ||
sorted(METRICS_WITH_AVERAGING | THRESHOLDED_METRICS_WITH_AVERAGING)) | ||
def test_averaging_multilabel(name): | ||
n_samples, n_classes = 40, 5 | ||
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes, | ||
|
@@ -1019,7 +1021,7 @@ def test_averaging_multilabel(name): | |
y_pred, y_pred_binarize, y_score) | ||
|
||
|
||
@pytest.mark.parametrize('name', METRICS_WITH_AVERAGING) | ||
@pytest.mark.parametrize('name', sorted(METRICS_WITH_AVERAGING)) | ||
def test_averaging_multilabel_all_zeroes(name): | ||
y_true = np.zeros((20, 3)) | ||
y_pred = np.zeros((20, 3)) | ||
|
@@ -1044,7 +1046,7 @@ def test_averaging_binary_multilabel_all_zeroes(): | |
y_pred_binarize, is_multilabel=True) | ||
|
||
|
||
@pytest.mark.parametrize('name', METRICS_WITH_AVERAGING) | ||
@pytest.mark.parametrize('name', sorted(METRICS_WITH_AVERAGING)) | ||
def test_averaging_multilabel_all_ones(name): | ||
y_true = np.ones((20, 3)) | ||
y_pred = np.ones((20, 3)) | ||
|
@@ -1136,9 +1138,10 @@ def check_sample_weight_invariance(name, metric, y1, y2): | |
|
||
|
||
@pytest.mark.parametrize( | ||
'name', | ||
(set(ALL_METRICS).intersection(set(REGRESSION_METRICS)) | ||
- METRICS_WITHOUT_SAMPLE_WEIGHT)) | ||
'name', | ||
sorted( | ||
set(ALL_METRICS).intersection(set(REGRESSION_METRICS)) - | ||
METRICS_WITHOUT_SAMPLE_WEIGHT)) | ||
def test_regression_sample_weight_invariance(name): | ||
n_samples = 50 | ||
random_state = check_random_state(0) | ||
|
@@ -1150,9 +1153,10 @@ def test_regression_sample_weight_invariance(name): | |
|
||
|
||
@pytest.mark.parametrize( | ||
'name', | ||
(set(ALL_METRICS) - set(REGRESSION_METRICS) | ||
- METRICS_WITHOUT_SAMPLE_WEIGHT - METRIC_UNDEFINED_BINARY)) | ||
'name', | ||
sorted( | ||
set(ALL_METRICS) - set(REGRESSION_METRICS) - | ||
METRICS_WITHOUT_SAMPLE_WEIGHT - METRIC_UNDEFINED_BINARY)) | ||
def test_binary_sample_weight_invariance(name): | ||
# binary | ||
n_samples = 50 | ||
|
@@ -1168,10 +1172,10 @@ def test_binary_sample_weight_invariance(name): | |
|
||
|
||
@pytest.mark.parametrize( | ||
'name', | ||
(set(ALL_METRICS) - set(REGRESSION_METRICS) | ||
- METRICS_WITHOUT_SAMPLE_WEIGHT | ||
- METRIC_UNDEFINED_BINARY_MULTICLASS)) | ||
'name', | ||
sorted( | ||
set(ALL_METRICS) - set(REGRESSION_METRICS) - | ||
METRICS_WITHOUT_SAMPLE_WEIGHT - METRIC_UNDEFINED_BINARY_MULTICLASS)) | ||
def test_multiclass_sample_weight_invariance(name): | ||
# multiclass | ||
n_samples = 50 | ||
|
@@ -1187,9 +1191,9 @@ def test_multiclass_sample_weight_invariance(name): | |
|
||
|
||
@pytest.mark.parametrize( | ||
'name', | ||
(MULTILABELS_METRICS | THRESHOLDED_MULTILABEL_METRICS | | ||
MULTIOUTPUT_METRICS) - METRICS_WITHOUT_SAMPLE_WEIGHT) | ||
'name', | ||
sorted((MULTILABELS_METRICS | THRESHOLDED_MULTILABEL_METRICS | ||
| MULTIOUTPUT_METRICS) - METRICS_WITHOUT_SAMPLE_WEIGHT)) | ||
def test_multilabel_sample_weight_invariance(name): | ||
# multilabel indicator | ||
random_state = check_random_state(0) | ||
|
@@ -1235,7 +1239,8 @@ def test_no_averaging_labels(): | |
|
||
|
||
@pytest.mark.parametrize( | ||
'name', MULTILABELS_METRICS - {"unnormalized_multilabel_confusion_matrix"}) | ||
'name', | ||
sorted(MULTILABELS_METRICS - {"unnormalized_multilabel_confusion_matrix"})) | ||
def test_multilabel_label_permutations_invariance(name): | ||
random_state = check_random_state(0) | ||
n_samples, n_classes = 20, 4 | ||
|
@@ -1255,7 +1260,7 @@ def test_multilabel_label_permutations_invariance(name): | |
|
||
|
||
@pytest.mark.parametrize( | ||
'name', THRESHOLDED_MULTILABEL_METRICS | MULTIOUTPUT_METRICS) | ||
'name', sorted(THRESHOLDED_MULTILABEL_METRICS | MULTIOUTPUT_METRICS)) | ||
def test_thresholded_multilabel_multioutput_permutations_invariance(name): | ||
random_state = check_random_state(0) | ||
n_samples, n_classes = 20, 4 | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Maybe add a comment that this requires pytest-xdist. Else pytest fails with
pytest: error: unrecognized arguments: -n