Skip to content

bpo-36854: Move GC runtime state from _PyRuntimeState to PyInterpreterState. #13219

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions Include/internal/pycore_object.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ PyAPI_FUNC(int) _PyDict_CheckConsistency(PyObject *mp, int check_content);
* NB: While the object is tracked by the collector, it must be safe to call the
* ob_traverse method.
*
* Internal note: _PyRuntime.gc.generation0->_gc_prev doesn't have any bit flags
* Internal note: PyInterpreterState.gc.generation0->_gc_prev doesn't have any bit flags
* because it's not object header. So we don't use _PyGCHead_PREV() and
* _PyGCHead_SET_PREV() for it to avoid unnecessary bitwise operations.
*
Expand All @@ -38,11 +38,12 @@ static inline void _PyObject_GC_TRACK_impl(const char *filename, int lineno,
"object is in generation which is garbage collected",
filename, lineno, "_PyObject_GC_TRACK");

PyGC_Head *last = (PyGC_Head*)(_PyRuntime.gc.generation0->_gc_prev);
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would it be possible to use a "struct _gc_runtime_state *state" variable instead?

struct _gc_runtime_state *state = &_PyInterpreterState_GET_UNSAFE()->gc;

PyGC_Head *last = (PyGC_Head*)(interp->gc.generation0->_gc_prev);
_PyGCHead_SET_NEXT(last, gc);
_PyGCHead_SET_PREV(gc, last);
_PyGCHead_SET_NEXT(gc, _PyRuntime.gc.generation0);
_PyRuntime.gc.generation0->_gc_prev = (uintptr_t)gc;
_PyGCHead_SET_NEXT(gc, interp->gc.generation0);
interp->gc.generation0->_gc_prev = (uintptr_t)gc;
}

#define _PyObject_GC_TRACK(op) \
Expand Down
4 changes: 2 additions & 2 deletions Include/internal/pycore_pylifecycle.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ extern void PyAsyncGen_Fini(void);
extern void _PyExc_Fini(void);
extern void _PyImport_Fini(void);
extern void _PyImport_Fini2(void);
extern void _PyGC_Fini(_PyRuntimeState *runtime);
extern void _PyGC_Fini(PyInterpreterState *interp);
extern void _PyType_Fini(void);
extern void _Py_HashRandomization_Fini(void);
extern void _PyUnicode_Fini(void);
Expand All @@ -89,7 +89,7 @@ extern void _PyGILState_Init(
PyThreadState *tstate);
extern void _PyGILState_Fini(_PyRuntimeState *runtime);

PyAPI_FUNC(void) _PyGC_DumpShutdownStats(_PyRuntimeState *runtime);
PyAPI_FUNC(void) _PyGC_DumpShutdownStats(PyInterpreterState *interp);

PyAPI_FUNC(_PyInitError) _Py_PreInitializeFromPyArgv(
const _PyPreConfig *src_config,
Expand Down
2 changes: 1 addition & 1 deletion Include/internal/pycore_pystate.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ struct _is {
uint64_t tstate_next_unique_id;

struct _warnings_runtime_state warnings;
struct _gc_runtime_state gc;
};

PyAPI_FUNC(struct _is*) _PyInterpreterState_LookUpID(PY_INT64_T);
Expand Down Expand Up @@ -180,7 +181,6 @@ typedef struct pyruntimestate {
void (*exitfuncs[NEXITFUNCS])(void);
int nexitfuncs;

struct _gc_runtime_state gc;
struct _ceval_runtime_state ceval;
struct _gilstate_runtime_state gilstate;

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
Move GC runtime state from _PyRuntimeState to PyInterpreterState. As part
of this, Python threads (created by the threading module) are marked as
"deleted" earlier in finalization. That marker is how joining threads get
unblocked. It was happening in `PyThreadState_Delete()` but now it will
happen in `PyThreadState_Clear()`. This is necessary to ensure that the
callback gets called *before* much interpreter/runtime finalization happens.
(This change could impact daemon threads, but we already can't guarantee
behavior for those once finalization starts.)
68 changes: 44 additions & 24 deletions Modules/gcmodule.c
Original file line number Diff line number Diff line change
Expand Up @@ -1257,7 +1257,8 @@ static PyObject *
gc_enable_impl(PyObject *module)
/*[clinic end generated code: output=45a427e9dce9155c input=81ac4940ca579707]*/
{
_PyRuntime.gc.enabled = 1;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would it be possible to use a "struct _gc_runtime_state *state" variable instead?

struct _gc_runtime_state *state = &_PyInterpreterState_GET_UNSAFE()->gc;

If the line is too lone, maybe add a GET_STATE() macro which returns &_PyInterpreterState_GET_UNSAFE()->gc.

Same comment for the whole file. I would prefer to avoid introducing "PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); " lines if it's only used to get the GC state.

interp->gc.enabled = 1;
Py_RETURN_NONE;
}

Expand All @@ -1271,7 +1272,8 @@ static PyObject *
gc_disable_impl(PyObject *module)
/*[clinic end generated code: output=97d1030f7aa9d279 input=8c2e5a14e800d83b]*/
{
_PyRuntime.gc.enabled = 0;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
interp->gc.enabled = 0;
Py_RETURN_NONE;
}

Expand All @@ -1285,7 +1287,8 @@ static int
gc_isenabled_impl(PyObject *module)
/*[clinic end generated code: output=1874298331c49130 input=30005e0422373b31]*/
{
return _PyRuntime.gc.enabled;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
return interp->gc.enabled;
}

/*[clinic input]
Expand All @@ -1312,7 +1315,8 @@ gc_collect_impl(PyObject *module, int generation)
return -1;
}

struct _gc_runtime_state *state = &_PyRuntime.gc;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
struct _gc_runtime_state *state = &interp->gc;
Py_ssize_t n;
if (state->collecting) {
/* already collecting, don't do anything */
Expand Down Expand Up @@ -1348,7 +1352,8 @@ static PyObject *
gc_set_debug_impl(PyObject *module, int flags)
/*[clinic end generated code: output=7c8366575486b228 input=5e5ce15e84fbed15]*/
{
_PyRuntime.gc.debug = flags;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
interp->gc.debug = flags;

Py_RETURN_NONE;
}
Expand All @@ -1363,7 +1368,8 @@ static int
gc_get_debug_impl(PyObject *module)
/*[clinic end generated code: output=91242f3506cd1e50 input=91a101e1c3b98366]*/
{
return _PyRuntime.gc.debug;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
return interp->gc.debug;
}

PyDoc_STRVAR(gc_set_thresh__doc__,
Expand All @@ -1375,7 +1381,8 @@ PyDoc_STRVAR(gc_set_thresh__doc__,
static PyObject *
gc_set_threshold(PyObject *self, PyObject *args)
{
struct _gc_runtime_state *state = &_PyRuntime.gc;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
struct _gc_runtime_state *state = &interp->gc;
if (!PyArg_ParseTuple(args, "i|ii:set_threshold",
&state->generations[0].threshold,
&state->generations[1].threshold,
Expand All @@ -1398,7 +1405,8 @@ static PyObject *
gc_get_threshold_impl(PyObject *module)
/*[clinic end generated code: output=7902bc9f41ecbbd8 input=286d79918034d6e6]*/
{
struct _gc_runtime_state *state = &_PyRuntime.gc;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
struct _gc_runtime_state *state = &interp->gc;
return Py_BuildValue("(iii)",
state->generations[0].threshold,
state->generations[1].threshold,
Expand All @@ -1415,7 +1423,8 @@ static PyObject *
gc_get_count_impl(PyObject *module)
/*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/
{
struct _gc_runtime_state *state = &_PyRuntime.gc;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
struct _gc_runtime_state *state = &interp->gc;
return Py_BuildValue("(iii)",
state->generations[0].count,
state->generations[1].count,
Expand Down Expand Up @@ -1462,7 +1471,8 @@ gc_get_referrers(PyObject *self, PyObject *args)
PyObject *result = PyList_New(0);
if (!result) return NULL;

struct _gc_runtime_state *state = &_PyRuntime.gc;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
struct _gc_runtime_state *state = &interp->gc;
for (i = 0; i < NUM_GENERATIONS; i++) {
if (!(gc_referrers_for(args, GEN_HEAD(state, i), result))) {
Py_DECREF(result);
Expand Down Expand Up @@ -1526,7 +1536,8 @@ gc_get_objects_impl(PyObject *module, Py_ssize_t generation)
{
int i;
PyObject* result;
struct _gc_runtime_state *state = &_PyRuntime.gc;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
struct _gc_runtime_state *state = &interp->gc;

result = PyList_New(0);
if (result == NULL) {
Expand Down Expand Up @@ -1584,7 +1595,8 @@ gc_get_stats_impl(PyObject *module)

/* To get consistent values despite allocations while constructing
the result list, we use a snapshot of the running stats. */
struct _gc_runtime_state *state = &_PyRuntime.gc;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
struct _gc_runtime_state *state = &interp->gc;
for (i = 0; i < NUM_GENERATIONS; i++) {
stats[i] = state->generation_stats[i];
}
Expand Down Expand Up @@ -1656,7 +1668,8 @@ static PyObject *
gc_freeze_impl(PyObject *module)
/*[clinic end generated code: output=502159d9cdc4c139 input=b602b16ac5febbe5]*/
{
struct _gc_runtime_state *state = &_PyRuntime.gc;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
struct _gc_runtime_state *state = &interp->gc;
for (int i = 0; i < NUM_GENERATIONS; ++i) {
gc_list_merge(GEN_HEAD(state, i), &state->permanent_generation.head);
state->generations[i].count = 0;
Expand All @@ -1676,7 +1689,8 @@ static PyObject *
gc_unfreeze_impl(PyObject *module)
/*[clinic end generated code: output=1c15f2043b25e169 input=2dd52b170f4cef6c]*/
{
struct _gc_runtime_state *state = &_PyRuntime.gc;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
struct _gc_runtime_state *state = &interp->gc;
gc_list_merge(&state->permanent_generation.head, GEN_HEAD(state, NUM_GENERATIONS-1));
Py_RETURN_NONE;
}
Expand All @@ -1691,7 +1705,8 @@ static Py_ssize_t
gc_get_freeze_count_impl(PyObject *module)
/*[clinic end generated code: output=61cbd9f43aa032e1 input=45ffbc65cfe2a6ed]*/
{
return gc_list_size(&_PyRuntime.gc.permanent_generation.head);
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
return gc_list_size(&interp->gc.permanent_generation.head);
}


Expand Down Expand Up @@ -1762,7 +1777,8 @@ PyInit_gc(void)
return NULL;
}

struct _gc_runtime_state *state = &_PyRuntime.gc;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
struct _gc_runtime_state *state = &interp->gc;
if (state->garbage == NULL) {
state->garbage = PyList_New(0);
if (state->garbage == NULL)
Expand Down Expand Up @@ -1795,7 +1811,8 @@ PyInit_gc(void)
Py_ssize_t
PyGC_Collect(void)
{
struct _gc_runtime_state *state = &_PyRuntime.gc;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
struct _gc_runtime_state *state = &interp->gc;
if (!state->enabled) {
return 0;
}
Expand Down Expand Up @@ -1828,7 +1845,8 @@ _PyGC_CollectNoFail(void)
{
assert(!PyErr_Occurred());

struct _gc_runtime_state *state = &_PyRuntime.gc;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
struct _gc_runtime_state *state = &interp->gc;
Py_ssize_t n;

/* Ideally, this function is only called on interpreter shutdown,
Expand All @@ -1849,9 +1867,9 @@ _PyGC_CollectNoFail(void)
}

void
_PyGC_DumpShutdownStats(_PyRuntimeState *runtime)
_PyGC_DumpShutdownStats(PyInterpreterState *interp)
{
struct _gc_runtime_state *state = &runtime->gc;
struct _gc_runtime_state *state = &interp->gc;
if (!(state->debug & DEBUG_SAVEALL)
&& state->garbage != NULL && PyList_GET_SIZE(state->garbage) > 0) {
const char *message;
Expand Down Expand Up @@ -1886,9 +1904,9 @@ _PyGC_DumpShutdownStats(_PyRuntimeState *runtime)
}

void
_PyGC_Fini(_PyRuntimeState *runtime)
_PyGC_Fini(PyInterpreterState *interp)
{
struct _gc_runtime_state *state = &runtime->gc;
struct _gc_runtime_state *state = &interp->gc;
Py_CLEAR(state->garbage);
Py_CLEAR(state->callbacks);
}
Expand Down Expand Up @@ -1930,7 +1948,8 @@ PyObject_GC_UnTrack(void *op_raw)
static PyObject *
_PyObject_GC_Alloc(int use_calloc, size_t basicsize)
{
struct _gc_runtime_state *state = &_PyRuntime.gc;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
struct _gc_runtime_state *state = &interp->gc;
PyObject *op;
PyGC_Head *g;
size_t size;
Expand Down Expand Up @@ -2023,7 +2042,8 @@ PyObject_GC_Del(void *op)
if (_PyObject_GC_IS_TRACKED(op)) {
gc_list_remove(g);
}
struct _gc_runtime_state *state = &_PyRuntime.gc;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
struct _gc_runtime_state *state = &interp->gc;
if (state->generations[0].count > 0) {
state->generations[0].count--;
}
Expand Down
16 changes: 9 additions & 7 deletions Objects/object.c
Original file line number Diff line number Diff line change
Expand Up @@ -2052,8 +2052,9 @@ _PyTrash_deposit_object(PyObject *op)
_PyObject_ASSERT(op, PyObject_IS_GC(op));
_PyObject_ASSERT(op, !_PyObject_GC_IS_TRACKED(op));
_PyObject_ASSERT(op, op->ob_refcnt == 0);
_PyGCHead_SET_PREV(_Py_AS_GC(op), _PyRuntime.gc.trash_delete_later);
_PyRuntime.gc.trash_delete_later = op;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
_PyGCHead_SET_PREV(_Py_AS_GC(op), interp->gc.trash_delete_later);
interp->gc.trash_delete_later = op;
}

/* The equivalent API, using per-thread state recursion info */
Expand All @@ -2074,11 +2075,12 @@ _PyTrash_thread_deposit_object(PyObject *op)
void
_PyTrash_destroy_chain(void)
{
while (_PyRuntime.gc.trash_delete_later) {
PyObject *op = _PyRuntime.gc.trash_delete_later;
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Again, a "state" variable would be more appropriate here.

while (interp->gc.trash_delete_later) {
PyObject *op = interp->gc.trash_delete_later;
destructor dealloc = Py_TYPE(op)->tp_dealloc;

_PyRuntime.gc.trash_delete_later =
interp->gc.trash_delete_later =
(PyObject*) _PyGCHead_PREV(_Py_AS_GC(op));

/* Call the deallocator directly. This used to try to
Expand All @@ -2088,9 +2090,9 @@ _PyTrash_destroy_chain(void)
* up distorting allocation statistics.
*/
_PyObject_ASSERT(op, op->ob_refcnt == 0);
++_PyRuntime.gc.trash_delete_nesting;
++interp->gc.trash_delete_nesting;
(*dealloc)(op);
--_PyRuntime.gc.trash_delete_nesting;
--interp->gc.trash_delete_nesting;
}
}

Expand Down
2 changes: 1 addition & 1 deletion Python/import.c
Original file line number Diff line number Diff line change
Expand Up @@ -535,7 +535,7 @@ PyImport_Cleanup(void)
_PyGC_CollectNoFail();
/* Dump GC stats before it's too late, since it uses the warnings
machinery. */
_PyGC_DumpShutdownStats(&_PyRuntime);
_PyGC_DumpShutdownStats(interp);

/* Now, if there are any modules left alive, clear their globals to
minimize potential leaks. All C extension modules actually end
Expand Down
Loading