Pytest Testing for Python
Quick Reference
Test Discovery
Pytest auto-discovers tests matching:
- Files:
test_*.pyor*_test.py - Functions:
test_*prefix - Classes:
Test*prefix (no__init__)
Running Tests
pytest # Run all tests
pytest test_mod.py # Single module
pytest tests/ # Directory
pytest -k "name" # By keyword
pytest -m slow # By marker
pytest test_mod.py::test_func # Specific test
pytest --durations=10 # Show slowest tests
Fixtures
Fixtures provide reusable test dependencies.
Basic Fixture
import pytest
@pytest.fixture
def sample_data():
return {"key": "value"}
def test_example(sample_data):
assert sample_data["key"] == "value"
Fixture Scopes
@pytest.fixture(scope="function") # Default: per test
@pytest.fixture(scope="class") # Per test class
@pytest.fixture(scope="module") # Per module
@pytest.fixture(scope="session") # Entire session
Fixture with Teardown (yield)
@pytest.fixture
def db_connection():
conn = create_connection()
yield conn
conn.close() # Cleanup after test
Factory Fixture
@pytest.fixture
def make_user():
def _make_user(name, role="user"):
return {"name": name, "role": role}
return _make_user
def test_users(make_user):
admin = make_user("Alice", role="admin")
user = make_user("Bob")
Parametrized Fixture
@pytest.fixture(params=["mysql", "postgres", "sqlite"])
def database(request):
return create_db(request.param)
Parametrization
Run tests with multiple inputs.
@pytest.mark.parametrize("input,expected", [
(1, 2),
(2, 4),
(3, 6),
])
def test_double(input, expected):
assert input * 2 == expected
Multiple Parameters (Combinations)
@pytest.mark.parametrize("x", [1, 2])
@pytest.mark.parametrize("y", [10, 20])
def test_multiply(x, y): # Runs 4 combinations
assert x * y > 0
With Expected Failures
@pytest.mark.parametrize("input,expected", [
(1, 1),
pytest.param(0, 1, marks=pytest.mark.xfail),
])
def test_factorial(input, expected):
assert factorial(input) == expected
Markers
Built-in Markers
@pytest.mark.skip(reason="Not implemented")
def test_feature(): ...
@pytest.mark.skipif(sys.platform == "win32", reason="Unix only")
def test_unix(): ...
@pytest.mark.xfail(reason="Known bug")
def test_buggy(): ...
Custom Markers
Register in pytest.ini or pyproject.toml:
[pytest]
markers =
slow: marks tests as slow
integration: integration tests
@pytest.mark.slow
def test_slow_operation(): ...
Run: pytest -m slow or pytest -m "not slow"
Assertions
Basic Assertions
assert value == expected
assert value != other
assert value is None
assert value is not None
assert value in collection
assert isinstance(obj, MyClass)
Floating Point
assert 0.1 + 0.2 == pytest.approx(0.3)
assert result == pytest.approx(expected, rel=1e-3)
Exception Testing
def test_raises():
with pytest.raises(ValueError):
int("invalid")
def test_raises_with_match():
with pytest.raises(ValueError, match=r"invalid.*"):
raise ValueError("invalid input")
def test_raises_inspect():
with pytest.raises(ValueError) as exc_info:
raise ValueError("test error")
assert "test" in str(exc_info.value)
Monkeypatch (Mocking)
Patching Functions
def test_api_call(monkeypatch):
def mock_get(*args, **kwargs):
return {"status": "ok"}
monkeypatch.setattr("mymodule.api.get", mock_get)
result = mymodule.fetch_data()
assert result["status"] == "ok"
Environment Variables
def test_with_env(monkeypatch):
monkeypatch.setenv("API_KEY", "test-key")
assert os.environ["API_KEY"] == "test-key"
def test_without_env(monkeypatch):
monkeypatch.delenv("API_KEY", raising=False)
Dictionary Values
def test_config(monkeypatch):
monkeypatch.setitem(app.config, "DEBUG", True)
Built-in Fixtures
| Fixture | Purpose |
|---------|---------|
| tmp_path | Temporary directory (pathlib.Path) |
| tmp_path_factory | Session-scoped temp directories |
| capsys | Capture stdout/stderr |
| caplog | Capture log messages |
| monkeypatch | Dynamic patching |
| request | Fixture/test metadata |
Examples
def test_output(capsys):
print("hello")
captured = capsys.readouterr()
assert captured.out == "hello\n"
def test_logging(caplog):
import logging
logging.warning("test warning")
assert "test warning" in caplog.text
def test_temp_file(tmp_path):
file = tmp_path / "test.txt"
file.write_text("content")
assert file.read_text() == "content"
Project Structure
Recommended layout:
project/
├── pyproject.toml
├── src/
│ └── mypackage/
│ ├── __init__.py
│ └── module.py
└── tests/
├── conftest.py # Shared fixtures
├── test_module.py
└── unit/
└── test_specific.py
conftest.py
Shared fixtures available to all tests in directory:
# tests/conftest.py
import pytest
@pytest.fixture
def app():
return create_app(testing=True)
@pytest.fixture
def client(app):
return app.test_client()
Configuration
pyproject.toml
[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py"]
python_functions = ["test_*"]
addopts = "-v --strict-markers"
markers = [
"slow: marks tests as slow",
"integration: integration tests",
]
filterwarnings = [
"ignore::DeprecationWarning",
]
Best Practices
- One assertion focus per test - Test one behavior per function
- Descriptive names -
test_user_creation_with_invalid_email_raises_error - Use fixtures - Avoid setup duplication
- Isolate tests - No shared state between tests
- Fast unit tests - Mark slow tests with
@pytest.mark.slow - Parametrize - Use parametrize over copy-paste tests
- Test edge cases - Empty inputs, boundaries, errors
References
- Fixtures Guide - Advanced fixture patterns
- Patterns Guide - Common testing patterns