Skip to content

Commit d93cd3b

Browse files
authored
remove artificially duplicated test [ci skip]
1 parent e680efc commit d93cd3b

File tree

1 file changed

+0
-48
lines changed

1 file changed

+0
-48
lines changed

spacy/tests/pipeline/test_textcat.py

Lines changed: 0 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -226,54 +226,6 @@ def test_overfitting_IO():
226226
assert_equal(batch_cats_1, no_batch_cats)
227227

228228

229-
@pytest.mark.skip(reason="TODO: Can this be removed?")
230-
def test_overfitting_IO_multi_old():
231-
# Simple test to try and quickly overfit the multi-label textcat component - ensuring the ML models work correctly
232-
fix_random_seed(0)
233-
nlp = English()
234-
# Set exclusive labels to False
235-
config = {"model": {"linear_model": {"exclusive_classes": False}}}
236-
textcat = nlp.add_pipe("textcat", config=config)
237-
train_examples = []
238-
for text, annotations in TRAIN_DATA_MULTI_LABEL:
239-
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
240-
optimizer = nlp.initialize(get_examples=lambda: train_examples)
241-
assert textcat.model.get_dim("nO") == 2
242-
243-
for i in range(50):
244-
losses = {}
245-
nlp.update(train_examples, sgd=optimizer, losses=losses)
246-
assert losses["textcat"] < 0.01
247-
248-
# test the trained model
249-
test_text = "I am happy."
250-
doc = nlp(test_text)
251-
cats = doc.cats
252-
assert cats["POSITIVE"] > 0.9
253-
254-
# Also test the results are still the same after IO
255-
with make_tempdir() as tmp_dir:
256-
nlp.to_disk(tmp_dir)
257-
nlp2 = util.load_model_from_path(tmp_dir)
258-
doc2 = nlp2(test_text)
259-
cats2 = doc2.cats
260-
assert cats2["POSITIVE"] > 0.9
261-
262-
# Test scoring
263-
scores = nlp.evaluate(train_examples)
264-
assert scores["cats_micro_f"] == 1.0
265-
assert scores["cats_score"] == 1.0
266-
assert "cats_score_desc" in scores
267-
268-
# Make sure that running pipe twice, or comparing to call, always amounts to the same predictions
269-
texts = ["Just a sentence.", "I like green eggs.", "I am happy.", "I eat ham."]
270-
batch_cats_1 = [doc.cats for doc in nlp.pipe(texts)]
271-
batch_cats_2 = [doc.cats for doc in nlp.pipe(texts)]
272-
no_batch_cats = [doc.cats for doc in [nlp(text) for text in texts]]
273-
assert_equal(batch_cats_1, batch_cats_2)
274-
assert_equal(batch_cats_1, no_batch_cats)
275-
276-
277229
def test_overfitting_IO_multi():
278230
# Simple test to try and quickly overfit the multi-label textcat component - ensuring the ML models work correctly
279231
fix_random_seed(0)

0 commit comments

Comments
 (0)