Skip to content

Commit

Permalink
Fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
DinoV committed Nov 20, 2024
1 parent ed0feaa commit 7815db2
Show file tree
Hide file tree
Showing 4 changed files with 61 additions and 63 deletions.
2 changes: 1 addition & 1 deletion Lib/test/test_free_threading/test_dict.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ class MyDict(dict): pass
THREAD_COUNT = 10

def writer_func(l):
for i in range(100000):
for i in range(1000):
if cyclic:
other_d = {}
d = MyDict({"foo": 100})
Expand Down
77 changes: 39 additions & 38 deletions Objects/dictobject.c
Original file line number Diff line number Diff line change
Expand Up @@ -7113,7 +7113,8 @@ try_set_dict_inline_only_or_other_dict(PyObject *obj, PyObject *new_dict, PyDict
(PyDictObject *)Py_XNewRef(new_dict));
replaced = true;
goto exit_lock;
} else {
}
else {
// We have inline values, we need to lock the dict and the object
// at the same time to safely dematerialize them. To do that while releasing
// the object lock we need a strong reference to the current dictionary.
Expand All @@ -7129,39 +7130,38 @@ try_set_dict_inline_only_or_other_dict(PyObject *obj, PyObject *new_dict, PyDict
// and replaced it with another dictionary though.
static int
replace_dict_probably_inline_materialized(PyObject *obj, PyDictObject *inline_dict,
PyObject *new_dict, bool clear,
PyDictObject **replaced_dict)
PyDictObject *cur_dict, PyObject *new_dict)
{
// But we could have had another thread race in after we released
// the object lock
int err = 0;
*replaced_dict = _PyObject_GetManagedDict(obj);
assert(FT_ATOMIC_LOAD_PTR_RELAXED(inline_dict->ma_values) == _PyObject_InlineValues(obj));
_Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(obj);

if (cur_dict == inline_dict) {
assert(FT_ATOMIC_LOAD_PTR_RELAXED(inline_dict->ma_values) == _PyObject_InlineValues(obj));

if (*replaced_dict == inline_dict) {
err = _PyDict_DetachFromObject(inline_dict, obj);
int err = _PyDict_DetachFromObject(inline_dict, obj);
if (err != 0) {
assert(new_dict == NULL);
return err;
}
// We incref'd the inline dict and the object owns a ref.
// Clear the object's reference, we'll clear the local
// reference after releasing the lock.
if (clear) {
Py_XDECREF((PyObject *)*replaced_dict);
} else {
_PyObject_XDecRefDelayed((PyObject *)*replaced_dict);
}
*replaced_dict = NULL;
}

FT_ATOMIC_STORE_PTR(_PyObject_ManagedDictPointer(obj)->dict,
(PyDictObject *)Py_XNewRef(new_dict));
return err;
(PyDictObject *)Py_XNewRef(new_dict));
return 0;
}

#endif

static void
decref_maybe_delay(PyObject *obj, bool delay)
{
if (delay) {
_PyObject_XDecRefDelayed(obj);
}
else {
Py_XDECREF(obj);
}
}

static int
set_or_clear_managed_dict(PyObject *obj, PyObject *new_dict, bool clear)
{
Expand All @@ -7177,32 +7177,37 @@ set_or_clear_managed_dict(PyObject *obj, PyObject *new_dict, bool clear)
// values. We need to lock both the object and the dict at the
// same time to safely replace it. We can't merely lock the dictionary
// while the object is locked because it could suspend the object lock.
PyDictObject *replaced_dict;
PyDictObject *cur_dict;

assert(prev_dict != NULL);
Py_BEGIN_CRITICAL_SECTION2(obj, prev_dict);

err = replace_dict_probably_inline_materialized(obj, prev_dict, new_dict,
clear, &replaced_dict);
// We could have had another thread race in between the call to
// try_set_dict_inline_only_or_other_dict where we locked the object
// and when we unlocked and re-locked the dictionary.
cur_dict = _PyObject_GetManagedDict(obj);

err = replace_dict_probably_inline_materialized(obj, prev_dict,
cur_dict, new_dict);

Py_END_CRITICAL_SECTION2();

Py_DECREF(prev_dict);
// Decref for the dictionary we incref'd in try_set_dict_inline_only_or_other_dict
// while the object was locked
decref_maybe_delay((PyObject *)prev_dict,
!clear && prev_dict != cur_dict);
if (err != 0) {
return err;
}
prev_dict = replaced_dict;

prev_dict = cur_dict;
}

if (prev_dict != NULL) {
// Readers from the old dictionary use a borrowed reference. We need
// to set the decref the dict at the next safe point.
if (clear) {
Py_XDECREF((PyObject *)prev_dict);
} else {
_PyObject_XDecRefDelayed((PyObject *)prev_dict);
}
// decref for the dictionary that we replaced
decref_maybe_delay((PyObject *)prev_dict, !clear);
}

return 0;
#else
PyDictObject *dict = _PyObject_GetManagedDict(obj);
Expand Down Expand Up @@ -7230,11 +7235,7 @@ set_or_clear_managed_dict(PyObject *obj, PyObject *new_dict, bool clear)
(PyDictObject *)Py_XNewRef(new_dict));

Py_END_CRITICAL_SECTION();
if (clear) {
Py_XDECREF((PyObject *)dict);
} else {
_PyObject_XDecRefDelayed((PyObject *)dict);
}
decref_maybe_delay((PyObject *)dict, !clear);
}
assert(_PyObject_InlineValuesConsistencyCheck(obj));
return err;
Expand Down
2 changes: 1 addition & 1 deletion Objects/obmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1096,7 +1096,7 @@ static void
free_work_item(uintptr_t ptr, delayed_dealloc_cb cb, void *state)
{
if (ptr & 0x01) {
PyObject *obj = (PyObject*)(char *)(ptr - 1);
PyObject *obj = (PyObject *)(ptr - 1);
#ifdef Py_GIL_DISABLED
if (cb == NULL) {
assert(!_PyInterpreterState_GET()->stoptheworld.world_stopped);
Expand Down
43 changes: 20 additions & 23 deletions Python/gc_free_threading.c
Original file line number Diff line number Diff line change
Expand Up @@ -393,6 +393,23 @@ gc_visit_thread_stacks(PyInterpreterState *interp)
HEAD_UNLOCK(&_PyRuntime);
}

static void
queue_untracked_obj_decref(PyObject *op, struct collection_state *state)
{
if (!_PyObject_GC_IS_TRACKED(op)) {
// GC objects with zero refcount are handled subsequently by the
// GC as if they were cyclic trash, but we have to handle dead
// non-GC objects here. Add one to the refcount so that we can
// decref and deallocate the object once we start the world again.
op->ob_ref_shared += (1 << _Py_REF_SHARED_SHIFT);
#ifdef Py_REF_DEBUG
_Py_IncRefTotal(_PyThreadState_GET());
#endif
worklist_push(&state->objs_to_decref, op);
}

}

static void
merge_queued_objects(_PyThreadStateImpl *tstate, struct collection_state *state)
{
Expand All @@ -404,36 +421,16 @@ merge_queued_objects(_PyThreadStateImpl *tstate, struct collection_state *state)
// Subtract one when merging because the queue had a reference.
Py_ssize_t refcount = merge_refcount(op, -1);

if (!_PyObject_GC_IS_TRACKED(op) && refcount == 0) {
// GC objects with zero refcount are handled subsequently by the
// GC as if they were cyclic trash, but we have to handle dead
// non-GC objects here. Add one to the refcount so that we can
// decref and deallocate the object once we start the world again.
op->ob_ref_shared += (1 << _Py_REF_SHARED_SHIFT);
#ifdef Py_REF_DEBUG
_Py_IncRefTotal(_PyThreadState_GET());
#endif
worklist_push(&state->objs_to_decref, op);
if (refcount == 0) {
queue_untracked_obj_decref(op, state);
}
}
}

static void
queue_freed_object(PyObject *obj, void *arg)
{
struct collection_state *state = (struct collection_state *)arg;

// GC objects with zero refcount are handled subsequently by the
// GC as if they were cyclic trash, but we have to handle dead
// non-GC objects here. Add one to the refcount so that we can
// decref and deallocate the object once we start the world again.
if (!_PyObject_GC_IS_TRACKED(obj)) {
obj->ob_ref_shared += (1 << _Py_REF_SHARED_SHIFT);
#ifdef Py_REF_DEBUG
_Py_IncRefTotal(_PyThreadState_GET());
#endif
worklist_push(&state->objs_to_decref, obj);
}
queue_untracked_obj_decref(obj, arg);
}

static void
Expand Down

0 comments on commit 7815db2

Please sign in to comment.