Skip to content

Commit 9db95ee

Browse files
[tests] migrate to unittest.assertEqual (#10670)
Signed-off-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>
1 parent 4355abb commit 9db95ee

File tree

2 files changed

+8
-8
lines changed

2 files changed

+8
-8
lines changed

tests/transformers/speecht5/test_feature_extraction.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -333,7 +333,7 @@ def test_attention_mask_target(self):
333333
feat_dict["return_attention_mask"] = True
334334
feat_extract = self.feature_extraction_class(**feat_dict)
335335
speech_inputs = self.feat_extract_tester.prepare_inputs_for_target()
336-
input_lenghts = [len(x) for x in speech_inputs]
336+
input_lengths = [len(x) for x in speech_inputs]
337337
input_name = feat_extract.model_input_names[0]
338338

339339
processed = BatchFeature({input_name: speech_inputs})
@@ -343,18 +343,18 @@ def test_attention_mask_target(self):
343343
processed = feat_extract.pad(processed, padding="longest", return_tensors="np")
344344
self.assertIn("attention_mask", processed)
345345
self.assertListEqual(list(processed.attention_mask.shape), list(processed[input_name].shape[:2]))
346-
self.assertListEqual(processed.attention_mask.sum(-1).tolist(), input_lenghts)
346+
self.assertListEqual(processed.attention_mask.sum(-1).tolist(), input_lengths)
347347

348348
def test_attention_mask_with_truncation_target(self):
349349
feat_dict = self.feat_extract_dict
350350
feat_dict["return_attention_mask"] = True
351351
feat_extract = self.feature_extraction_class(**feat_dict)
352352
speech_inputs = self.feat_extract_tester.prepare_inputs_for_target()
353-
input_lenghts = [len(x) for x in speech_inputs]
353+
input_lengths = [len(x) for x in speech_inputs]
354354
input_name = feat_extract.model_input_names[0]
355355

356356
processed = BatchFeature({input_name: speech_inputs})
357-
max_length = min(input_lenghts)
357+
max_length = min(input_lengths)
358358

359359
feat_extract.feature_size = feat_extract.num_mel_bins # hack!
360360

@@ -393,7 +393,7 @@ def test_integration(self):
393393
input_speech = self._load_datasamples(1)
394394
feature_extractor = SpeechT5FeatureExtractor()
395395
input_values = feature_extractor(input_speech, return_tensors="pd").input_values
396-
self.assertEquals(input_values.shape, [1, 93680])
396+
self.assertEqual(input_values.shape, [1, 93680])
397397
self.assertTrue(paddle.allclose(input_values[0, :30], EXPECTED_INPUT_VALUES, atol=1e-6))
398398

399399
def test_integration_target(self):
@@ -409,5 +409,5 @@ def test_integration_target(self):
409409
input_speech = self._load_datasamples(1)
410410
feature_extractor = SpeechT5FeatureExtractor()
411411
input_values = feature_extractor(audio_target=input_speech, return_tensors="pd").input_values
412-
self.assertEquals(input_values.shape, [1, 366, 80])
412+
self.assertEqual(input_values.shape, [1, 366, 80])
413413
self.assertTrue(paddle.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-4))

tests/transformers/yuan/test_tokenizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,5 +45,5 @@ def test_extract_non_learnable_parts(self):
4545
context_data=context_data,
4646
)
4747
for idx, round in enumerate(conversation_result["conversations"]):
48-
self.assertEquals(tokenizer.decode(round[0]), decode_outputs[idx][0])
49-
self.assertEquals(tokenizer.decode(round[1]), decode_outputs[idx][1])
48+
self.assertEqual(tokenizer.decode(round[0]), decode_outputs[idx][0])
49+
self.assertEqual(tokenizer.decode(round[1]), decode_outputs[idx][1])

0 commit comments

Comments
 (0)