-
Notifications
You must be signed in to change notification settings - Fork 668
test: add more tests for RequiredOptional #2045
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
01f5fe4
to
c901ef8
Compare
c901ef8
to
ce40fde
Compare
class TestRequiredOptional: | ||
def test_requiredoptional_empty(self): | ||
b = types.RequiredOptional() | ||
assert not b.required | ||
assert not b.optional | ||
assert not b.exclusive | ||
|
||
def test_requiredoptional_values_no_keywords(self): | ||
b = types.RequiredOptional( | ||
("required1", "required2"), | ||
("optional1", "optional2"), | ||
("exclusive1", "exclusive2"), | ||
) | ||
assert b.required == ("required1", "required2") | ||
assert b.optional == ("optional1", "optional2") | ||
assert b.exclusive == ("exclusive1", "exclusive2") | ||
|
||
def test_requiredoptional_values_keywords(self): | ||
b = types.RequiredOptional( | ||
exclusive=("exclusive1", "exclusive2"), | ||
optional=("optional1", "optional2"), | ||
required=("required1", "required2"), | ||
) | ||
assert b.required == ("required1", "required2") | ||
assert b.optional == ("optional1", "optional2") | ||
assert b.exclusive == ("exclusive1", "exclusive2") | ||
|
||
def test_validate_attrs_required(self): | ||
data = {"required1": 1, "optional2": 2} | ||
rq = types.RequiredOptional(required=("required1",)) | ||
rq.validate_attrs(data=data) | ||
data = {"optional1": 1, "optional2": 2} | ||
with pytest.raises(AttributeError, match="Missing attributes: required1"): | ||
rq.validate_attrs(data=data) | ||
|
||
def test_validate_attrs_exclusive(self): | ||
data = {"exclusive1": 1, "optional1": 1} | ||
rq = types.RequiredOptional(exclusive=("exclusive1", "exclusive2")) | ||
rq.validate_attrs(data=data) | ||
data = {"exclusive1": 1, "exclusive2": 2, "optional1": 1} | ||
with pytest.raises( | ||
AttributeError, | ||
match="Provide only one of these attributes: exclusive1, exclusive2", | ||
): | ||
rq.validate_attrs(data=data) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Some time ago when I was rewriting from unittest to pytest I was trying to get rid of as many test classes as possible, would be super happy to see even less of them :D Would this work? Then we just give them more descriptive names and split them per one type of assert, e.g. something like this:
class TestRequiredOptional: | |
def test_requiredoptional_empty(self): | |
b = types.RequiredOptional() | |
assert not b.required | |
assert not b.optional | |
assert not b.exclusive | |
def test_requiredoptional_values_no_keywords(self): | |
b = types.RequiredOptional( | |
("required1", "required2"), | |
("optional1", "optional2"), | |
("exclusive1", "exclusive2"), | |
) | |
assert b.required == ("required1", "required2") | |
assert b.optional == ("optional1", "optional2") | |
assert b.exclusive == ("exclusive1", "exclusive2") | |
def test_requiredoptional_values_keywords(self): | |
b = types.RequiredOptional( | |
exclusive=("exclusive1", "exclusive2"), | |
optional=("optional1", "optional2"), | |
required=("required1", "required2"), | |
) | |
assert b.required == ("required1", "required2") | |
assert b.optional == ("optional1", "optional2") | |
assert b.exclusive == ("exclusive1", "exclusive2") | |
def test_validate_attrs_required(self): | |
data = {"required1": 1, "optional2": 2} | |
rq = types.RequiredOptional(required=("required1",)) | |
rq.validate_attrs(data=data) | |
data = {"optional1": 1, "optional2": 2} | |
with pytest.raises(AttributeError, match="Missing attributes: required1"): | |
rq.validate_attrs(data=data) | |
def test_validate_attrs_exclusive(self): | |
data = {"exclusive1": 1, "optional1": 1} | |
rq = types.RequiredOptional(exclusive=("exclusive1", "exclusive2")) | |
rq.validate_attrs(data=data) | |
data = {"exclusive1": 1, "exclusive2": 2, "optional1": 1} | |
with pytest.raises( | |
AttributeError, | |
match="Provide only one of these attributes: exclusive1, exclusive2", | |
): | |
rq.validate_attrs(data=data) | |
def test_required_optional_empty(self): | |
rq = types.RequiredOptional() | |
assert not rq.required | |
assert not rq.optional | |
assert not rq.exclusive | |
def test_required_optional_values_no_keywords(self): | |
rq = types.RequiredOptional( | |
("required1", "required2"), | |
("optional1", "optional2"), | |
("exclusive1", "exclusive2"), | |
) | |
assert rq.required == ("required1", "required2") | |
assert rq.optional == ("optional1", "optional2") | |
assert rq.exclusive == ("exclusive1", "exclusive2") | |
def test_required_optional_values_with_keywords(self): | |
rq = types.RequiredOptional( | |
exclusive=("exclusive1", "exclusive2"), | |
optional=("optional1", "optional2"), | |
required=("required1", "required2"), | |
) | |
assert rq.required == ("required1", "required2") | |
assert rq.optional == ("optional1", "optional2") | |
assert rq.exclusive == ("exclusive1", "exclusive2") | |
def test_validate_attrs_with_matching_required(self): | |
data = {"required1": 1, "optional2": 2} | |
rq = types.RequiredOptional(required=("required1",)) | |
rq.validate_attrs(data=data) | |
def test_validate_attrs_with_missing_required_raises(self): | |
data = {"optional1": 1, "optional2": 2} | |
rq = types.RequiredOptional(required=("required1",)) | |
with pytest.raises(AttributeError, match="Missing attributes: required1"): | |
rq.validate_attrs(data=data) | |
def test_validate_attrs_with_matching_exclusive(self): | |
data = {"exclusive1": 1, "optional1": 1} | |
rq = types.RequiredOptional(exclusive=("exclusive1", "exclusive2")) | |
rq.validate_attrs(data=data) | |
def test_validate_attrs_with_extra_exclusive_raises(self): | |
data = {"exclusive1": 1, "exclusive2": 2, "optional1": 1} | |
rq = types.RequiredOptional(exclusive=("exclusive1", "exclusive2")) | |
with pytest.raises( | |
AttributeError, | |
match="Provide only one of these attributes: exclusive1, exclusive2", | |
): | |
rq.validate_attrs(data=data) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Some time ago when I was rewriting from unittest to pytest I was trying to get rid of as many test classes as possible, would be super happy to see even less of them :D Would this work? Then we just give them more descriptive names and split them per one type of assert, e.g. something like this:
@nejch Thanks for the review!
I will admit that I disagree with this. I prefer the test class
approach as it contains the test cases for a data-type (in this case) into one class. The benefits for this is that all tests for the data-type should be contained within this test class. If in the future the data-type was moved to a different module it will be relatively easy to move the test class to a different test module.
The downside to me of not using test classes is now tests can be added anywhere in the file and tests that are completely un-related to each other can be interspersed throughout the file.
As far as more descriptive names, That can be done inside the test class without issue.
But I would like to learn more on what benefit is gained by not using test classes. As willing to have my opinions changed 😊
Thanks.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Hehe, not sure I want to have a philosophical debate on test classes vs modules here so I'm fine to also merge this as we have a few others in other places 😄 But from my perspective with pytest, since it doesn't use unittest's setup/teardown, the only benefit of classes is namespacing, and for that I prefer modules, so if there was any moving I'd just put them in a separate file I think.
It's also how I see most pytest projects do it, so it's probably more natural for people who know pytest. We already tend to do test module per resource, since tests are longer and trying to do 1:1 module.py<->test_module.py ratio would result in really long test files sometimes.
IMO it would also be quite a task to switch our functions to methods properly now that our suite has grown:
(.venv) nejc@zbook-fury:~/repos/python-gitlab$ grep -r '^ def test_' tests/ | wc -l
36
(.venv) nejc@zbook-fury:~/repos/python-gitlab$ grep -r '^def test_' tests/ | wc -l
579
I'd still prefer to see 1 type of assert per test to ensure they all run as I suggested above at least in unit tests, e.g. when testing raise after asserting on success, but I think we also have quite a few of these in our tests, so I can collect those for a follow-up later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@nejch Thanks! I for sure am not suggesting we move everything to classes. Just sometimes I prefer classes.
But if do a module per area of testing, how is the file naming typically done for the test module?
Say have a file: foo.py
and inside of it we have two gigantic classes we want to test: HugeClass
and GiantClass
. How would you name the two test modules? Would there be a directory foo
with two test files under it? Or two test files at the root test level named test_foo_HugeClass.py
and test_foo_GiantClass.py
? Something else?
Thanks.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't have a hard requirement/guideline for that, but either works for me depending on what gets a reasonable size IMO (just with snake case everywhere) :)
For example, we have mixins.py
which is a single file, but we have a whole mixins/
test dir where we have multiple modules testing different aspects of it, and you know exactly what aspect is being tested right away before even having to open the file (assuming they're clearly named).
For the client, we still use the opposite (a few root-level files) approach, but in this particular case that means we get up to almost 1k lines and there's a bit of scrolling to get to a test case.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@nejch Thanks!
No description provided.