diff options
Diffstat (limited to 'Python')
-rw-r--r-- | Python/ceval.c | 200 | ||||
-rw-r--r-- | Python/ceval_gil.h | 8 | ||||
-rw-r--r-- | Python/pylifecycle.c | 24 | ||||
-rw-r--r-- | Python/pystate.c | 150 |
4 files changed, 151 insertions, 231 deletions
diff --git a/Python/ceval.c b/Python/ceval.c index be75ade909..b311248c6a 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -96,61 +96,61 @@ static long dxp[256]; /* This can set eval_breaker to 0 even though gil_drop_request became 1. We believe this is all right because the eval loop will release the GIL eventually anyway. */ -#define COMPUTE_EVAL_BREAKER(interp) \ +#define COMPUTE_EVAL_BREAKER() \ _Py_atomic_store_relaxed( \ - &interp->ceval.eval_breaker, \ + &_PyRuntime.ceval.eval_breaker, \ GIL_REQUEST | \ _Py_atomic_load_relaxed(&_PyRuntime.ceval.signals_pending) | \ - _Py_atomic_load_relaxed(&interp->ceval.pending.calls_to_do) | \ - interp->ceval.pending.async_exc) + _Py_atomic_load_relaxed(&_PyRuntime.ceval.pending.calls_to_do) | \ + _PyRuntime.ceval.pending.async_exc) -#define SET_GIL_DROP_REQUEST(interp) \ +#define SET_GIL_DROP_REQUEST() \ do { \ _Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 1); \ - _Py_atomic_store_relaxed(&interp->ceval.eval_breaker, 1); \ + _Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \ } while (0) -#define RESET_GIL_DROP_REQUEST(interp) \ +#define RESET_GIL_DROP_REQUEST() \ do { \ _Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 0); \ - COMPUTE_EVAL_BREAKER(interp); \ + COMPUTE_EVAL_BREAKER(); \ } while (0) /* Pending calls are only modified under pending_lock */ -#define SIGNAL_PENDING_CALLS(interp) \ +#define SIGNAL_PENDING_CALLS() \ do { \ - _Py_atomic_store_relaxed(&interp->ceval.pending.calls_to_do, 1); \ - _Py_atomic_store_relaxed(&interp->ceval.eval_breaker, 1); \ + _Py_atomic_store_relaxed(&_PyRuntime.ceval.pending.calls_to_do, 1); \ + _Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \ } while (0) -#define UNSIGNAL_PENDING_CALLS(interp) \ +#define UNSIGNAL_PENDING_CALLS() \ do { \ - _Py_atomic_store_relaxed(&interp->ceval.pending.calls_to_do, 0); \ - COMPUTE_EVAL_BREAKER(interp); \ + _Py_atomic_store_relaxed(&_PyRuntime.ceval.pending.calls_to_do, 0); \ + COMPUTE_EVAL_BREAKER(); \ } while (0) #define SIGNAL_PENDING_SIGNALS() \ do { \ _Py_atomic_store_relaxed(&_PyRuntime.ceval.signals_pending, 1); \ - _Py_atomic_store_relaxed(&_PyRuntime.interpreters.main->ceval.eval_breaker, 1); \ + _Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \ } while (0) #define UNSIGNAL_PENDING_SIGNALS() \ do { \ _Py_atomic_store_relaxed(&_PyRuntime.ceval.signals_pending, 0); \ - COMPUTE_EVAL_BREAKER(_PyRuntime.interpreters.main); \ + COMPUTE_EVAL_BREAKER(); \ } while (0) -#define SIGNAL_ASYNC_EXC(interp) \ +#define SIGNAL_ASYNC_EXC() \ do { \ - interp->ceval.pending.async_exc = 1; \ - _Py_atomic_store_relaxed(&interp->ceval.eval_breaker, 1); \ + _PyRuntime.ceval.pending.async_exc = 1; \ + _Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \ } while (0) -#define UNSIGNAL_ASYNC_EXC(interp) \ +#define UNSIGNAL_ASYNC_EXC() \ do { \ - interp->ceval.pending.async_exc = 0; \ - COMPUTE_EVAL_BREAKER(interp); \ + _PyRuntime.ceval.pending.async_exc = 0; \ + COMPUTE_EVAL_BREAKER(); \ } while (0) @@ -174,6 +174,9 @@ PyEval_InitThreads(void) PyThread_init_thread(); create_gil(); take_gil(_PyThreadState_GET()); + _PyRuntime.ceval.pending.main_thread = PyThread_get_thread_ident(); + if (!_PyRuntime.ceval.pending.lock) + _PyRuntime.ceval.pending.lock = PyThread_allocate_lock(); } void @@ -240,11 +243,9 @@ PyEval_ReInitThreads(void) if (!gil_created()) return; recreate_gil(); - // This will be reset in make_pending_calls() below. - current_tstate->interp->ceval.pending.lock = NULL; - + _PyRuntime.ceval.pending.lock = PyThread_allocate_lock(); take_gil(current_tstate); - _PyRuntime.main_thread = PyThread_get_thread_ident(); + _PyRuntime.ceval.pending.main_thread = PyThread_get_thread_ident(); /* Destroy all threads except the current one */ _PyThreadState_DeleteExcept(current_tstate); @@ -254,9 +255,9 @@ PyEval_ReInitThreads(void) raised. */ void -_PyEval_SignalAsyncExc(PyInterpreterState *interp) +_PyEval_SignalAsyncExc(void) { - SIGNAL_ASYNC_EXC(interp); + SIGNAL_ASYNC_EXC(); } PyThreadState * @@ -322,58 +323,17 @@ _PyEval_SignalReceived(void) SIGNAL_PENDING_SIGNALS(); } -static int -_add_pending_call(PyInterpreterState *interp, unsigned long thread_id, int (*func)(void *), void *arg) -{ - int i = interp->ceval.pending.last; - int j = (i + 1) % NPENDINGCALLS; - if (j == interp->ceval.pending.first) { - return -1; /* Queue full */ - } - interp->ceval.pending.calls[i].thread_id = thread_id; - interp->ceval.pending.calls[i].func = func; - interp->ceval.pending.calls[i].arg = arg; - interp->ceval.pending.last = j; - return 0; -} - -/* pop one item off the queue while holding the lock */ -static void -_pop_pending_call(PyInterpreterState *interp, int (**func)(void *), void **arg) -{ - int i = interp->ceval.pending.first; - if (i == interp->ceval.pending.last) { - return; /* Queue empty */ - } - - *func = interp->ceval.pending.calls[i].func; - *arg = interp->ceval.pending.calls[i].arg; - interp->ceval.pending.first = (i + 1) % NPENDINGCALLS; - - unsigned long thread_id = interp->ceval.pending.calls[i].thread_id; - if (thread_id && PyThread_get_thread_ident() != thread_id) { - // Thread mismatch, so move it to the end of the list - // and start over. - _Py_AddPendingCall(interp, thread_id, *func, *arg); - return; - } -} - -int -Py_AddPendingCall(int (*func)(void *), void *arg) -{ - PyInterpreterState *interp = _PyRuntime.interpreters.main; - return _Py_AddPendingCall(interp, _PyRuntime.main_thread, func, arg); -} - /* This implementation is thread-safe. It allows scheduling to be made from any thread, and even from an executing callback. */ int -_Py_AddPendingCall(PyInterpreterState *interp, unsigned long thread_id, int (*func)(void *), void *arg) +Py_AddPendingCall(int (*func)(void *), void *arg) { + int i, j, result=0; + PyThread_type_lock lock = _PyRuntime.ceval.pending.lock; + /* try a few times for the lock. Since this mechanism is used * for signal handling (on the main thread), there is a (slim) * chance that a signal is delivered on the same thread while we @@ -385,9 +345,7 @@ _Py_AddPendingCall(PyInterpreterState *interp, unsigned long thread_id, int (*fu * We also check for lock being NULL, in the unlikely case that * this function is called before any bytecode evaluation takes place. */ - PyThread_type_lock lock = interp->ceval.pending.lock; if (lock != NULL) { - int i; for (i = 0; i<100; i++) { if (PyThread_acquire_lock(lock, NOWAIT_LOCK)) break; @@ -396,21 +354,17 @@ _Py_AddPendingCall(PyInterpreterState *interp, unsigned long thread_id, int (*fu return -1; } - int result = -1; - if (interp->finalizing) { - PyObject *exc, *val, *tb; - PyErr_Fetch(&exc, &val, &tb); - PyErr_SetString(PyExc_SystemError, "Py_AddPendingCall: cannot add pending calls (interpreter shutting down)"); - PyErr_Print(); - PyErr_Restore(exc, val, tb); - goto done; + i = _PyRuntime.ceval.pending.last; + j = (i + 1) % NPENDINGCALLS; + if (j == _PyRuntime.ceval.pending.first) { + result = -1; /* Queue full */ + } else { + _PyRuntime.ceval.pending.calls[i].func = func; + _PyRuntime.ceval.pending.calls[i].arg = arg; + _PyRuntime.ceval.pending.last = j; } - - result = _add_pending_call(interp, thread_id, func, arg); /* signal main loop */ - SIGNAL_PENDING_CALLS(interp); - -done: + SIGNAL_PENDING_CALLS(); if (lock != NULL) PyThread_release_lock(lock); return result; @@ -420,7 +374,9 @@ static int handle_signals(void) { /* Only handle signals on main thread. */ - if (PyThread_get_thread_ident() != _PyRuntime.main_thread) { + if (_PyRuntime.ceval.pending.main_thread && + PyThread_get_thread_ident() != _PyRuntime.ceval.pending.main_thread) + { return 0; } /* @@ -440,10 +396,17 @@ handle_signals(void) } static int -make_pending_calls(PyInterpreterState *interp) +make_pending_calls(void) { static int busy = 0; + /* only service pending calls on main thread */ + if (_PyRuntime.ceval.pending.main_thread && + PyThread_get_thread_ident() != _PyRuntime.ceval.pending.main_thread) + { + return 0; + } + /* don't perform recursive pending calls */ if (busy) { return 0; @@ -451,13 +414,13 @@ make_pending_calls(PyInterpreterState *interp) busy = 1; /* unsignal before starting to call callbacks, so that any callback added in-between re-signals */ - UNSIGNAL_PENDING_CALLS(interp); + UNSIGNAL_PENDING_CALLS(); int res = 0; - if (!interp->ceval.pending.lock) { + if (!_PyRuntime.ceval.pending.lock) { /* initial allocation of the lock */ - interp->ceval.pending.lock = PyThread_allocate_lock(); - if (interp->ceval.pending.lock == NULL) { + _PyRuntime.ceval.pending.lock = PyThread_allocate_lock(); + if (_PyRuntime.ceval.pending.lock == NULL) { res = -1; goto error; } @@ -465,18 +428,24 @@ make_pending_calls(PyInterpreterState *interp) /* perform a bounded number of calls, in case of recursion */ for (int i=0; i<NPENDINGCALLS; i++) { - int (*func)(void *) = NULL; + int j; + int (*func)(void *); void *arg = NULL; /* pop one item off the queue while holding the lock */ - PyThread_acquire_lock(interp->ceval.pending.lock, WAIT_LOCK); - _pop_pending_call(interp, &func, &arg); - PyThread_release_lock(interp->ceval.pending.lock); - + PyThread_acquire_lock(_PyRuntime.ceval.pending.lock, WAIT_LOCK); + j = _PyRuntime.ceval.pending.first; + if (j == _PyRuntime.ceval.pending.last) { + func = NULL; /* Queue empty */ + } else { + func = _PyRuntime.ceval.pending.calls[j].func; + arg = _PyRuntime.ceval.pending.calls[j].arg; + _PyRuntime.ceval.pending.first = (j + 1) % NPENDINGCALLS; + } + PyThread_release_lock(_PyRuntime.ceval.pending.lock); /* having released the lock, perform the callback */ - if (func == NULL) { + if (func == NULL) break; - } res = func(arg); if (res) { goto error; @@ -488,18 +457,10 @@ make_pending_calls(PyInterpreterState *interp) error: busy = 0; - SIGNAL_PENDING_CALLS(interp); /* We're not done yet */ + SIGNAL_PENDING_CALLS(); return res; } -int -_Py_MakePendingCalls(PyInterpreterState *interp) -{ - assert(PyGILState_Check()); - - return make_pending_calls(interp); -} - /* Py_MakePendingCalls() is a simple wrapper for the sake of backward-compatibility. */ int @@ -514,8 +475,12 @@ Py_MakePendingCalls(void) return res; } - PyInterpreterState *interp = _PyRuntime.interpreters.main; - return make_pending_calls(interp); + res = make_pending_calls(); + if (res != 0) { + return res; + } + + return 0; } /* The interpreter's recursion limit */ @@ -637,7 +602,6 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag) PyObject **fastlocals, **freevars; PyObject *retval = NULL; /* Return value */ PyThreadState *tstate = _PyThreadState_GET(); - _Py_atomic_int *eval_breaker = &tstate->interp->ceval.eval_breaker; PyCodeObject *co; /* when tracing we set things up so that @@ -723,7 +687,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag) #define DISPATCH() \ { \ - if (!_Py_atomic_load_relaxed(eval_breaker)) { \ + if (!_Py_atomic_load_relaxed(&_PyRuntime.ceval.eval_breaker)) { \ FAST_DISPATCH(); \ } \ continue; \ @@ -1025,7 +989,7 @@ main_loop: async I/O handler); see Py_AddPendingCall() and Py_MakePendingCalls() above. */ - if (_Py_atomic_load_relaxed(eval_breaker)) { + if (_Py_atomic_load_relaxed(&_PyRuntime.ceval.eval_breaker)) { opcode = _Py_OPCODE(*next_instr); if (opcode == SETUP_FINALLY || opcode == SETUP_WITH || @@ -1058,9 +1022,9 @@ main_loop: } } if (_Py_atomic_load_relaxed( - &(tstate->interp->ceval.pending.calls_to_do))) + &_PyRuntime.ceval.pending.calls_to_do)) { - if (_Py_MakePendingCalls(tstate->interp) != 0) { + if (make_pending_calls() != 0) { goto error; } } @@ -1092,7 +1056,7 @@ main_loop: if (tstate->async_exc != NULL) { PyObject *exc = tstate->async_exc; tstate->async_exc = NULL; - UNSIGNAL_ASYNC_EXC(tstate->interp); + UNSIGNAL_ASYNC_EXC(); PyErr_SetNone(exc); Py_DECREF(exc); goto error; diff --git a/Python/ceval_gil.h b/Python/ceval_gil.h index d9ad3616fa..f2d5fdba01 100644 --- a/Python/ceval_gil.h +++ b/Python/ceval_gil.h @@ -176,7 +176,7 @@ static void drop_gil(PyThreadState *tstate) &_PyRuntime.ceval.gil.last_holder) ) == tstate) { - RESET_GIL_DROP_REQUEST(tstate->interp); + RESET_GIL_DROP_REQUEST(); /* NOTE: if COND_WAIT does not atomically start waiting when releasing the mutex, another thread can run through, take the GIL and drop it again, and reset the condition @@ -213,7 +213,7 @@ static void take_gil(PyThreadState *tstate) if (timed_out && _Py_atomic_load_relaxed(&_PyRuntime.ceval.gil.locked) && _PyRuntime.ceval.gil.switch_number == saved_switchnum) { - SET_GIL_DROP_REQUEST(tstate->interp); + SET_GIL_DROP_REQUEST(); } } _ready: @@ -239,10 +239,10 @@ _ready: MUTEX_UNLOCK(_PyRuntime.ceval.gil.switch_mutex); #endif if (_Py_atomic_load_relaxed(&_PyRuntime.ceval.gil_drop_request)) { - RESET_GIL_DROP_REQUEST(tstate->interp); + RESET_GIL_DROP_REQUEST(); } if (tstate->async_exc != NULL) { - _PyEval_SignalAsyncExc(tstate->interp); + _PyEval_SignalAsyncExc(); } MUTEX_UNLOCK(_PyRuntime.ceval.gil.mutex); diff --git a/Python/pylifecycle.c b/Python/pylifecycle.c index 088e7aa931..a5cfc07c48 100644 --- a/Python/pylifecycle.c +++ b/Python/pylifecycle.c @@ -1460,32 +1460,8 @@ Py_EndInterpreter(PyThreadState *tstate) if (tstate->frame != NULL) Py_FatalError("Py_EndInterpreter: thread still has a frame"); - // Mark as finalizing. - if (interp->ceval.pending.lock != NULL) { - PyThread_acquire_lock(interp->ceval.pending.lock, 1); - } - interp->finalizing = 1; - if (interp->ceval.pending.lock != NULL) { - PyThread_release_lock(interp->ceval.pending.lock); - } - - // Wrap up existing threads. wait_for_thread_shutdown(); - // Make any pending calls. - if (_Py_atomic_load_relaxed( - &(interp->ceval.pending.calls_to_do))) - { - // XXX Ensure that the interpreter is running in the current thread? - if (_Py_MakePendingCalls(interp) < 0) { - PyObject *exc, *val, *tb; - PyErr_Fetch(&exc, &val, &tb); - PyErr_BadInternalCall(); - _PyErr_ChainExceptions(exc, val, tb); - PyErr_Print(); - } - } - call_py_exitfuncs(interp); if (tstate != interp->tstate_head || tstate->next != NULL) diff --git a/Python/pystate.c b/Python/pystate.c index 99a01efa6c..49497b7c37 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -133,19 +133,28 @@ PyInterpreterState_New(void) return NULL; } - memset(interp, 0, sizeof(*interp)); interp->id_refcount = -1; + interp->id_mutex = NULL; + interp->modules = NULL; + interp->modules_by_index = NULL; + interp->sysdict = NULL; + interp->builtins = NULL; + interp->builtins_copy = NULL; + interp->tstate_head = NULL; interp->check_interval = 100; - - interp->ceval.pending.lock = PyThread_allocate_lock(); - if (interp->ceval.pending.lock == NULL) { - PyErr_SetString(PyExc_RuntimeError, - "failed to create interpreter ceval pending mutex"); - return NULL; - } + interp->num_threads = 0; + interp->pythread_stacksize = 0; + interp->codec_search_path = NULL; + interp->codec_search_cache = NULL; + interp->codec_error_registry = NULL; + interp->codecs_initialized = 0; + interp->fscodec_initialized = 0; interp->core_config = _PyCoreConfig_INIT; interp->config = _PyMainInterpreterConfig_INIT; + interp->importlib = NULL; + interp->import_func = NULL; interp->eval_frame = _PyEval_EvalFrameDefault; + interp->co_extra_user_count = 0; #ifdef HAVE_DLOPEN #if HAVE_DECL_RTLD_NOW interp->dlopenflags = RTLD_NOW; @@ -153,10 +162,13 @@ PyInterpreterState_New(void) interp->dlopenflags = RTLD_LAZY; #endif #endif - - if (_PyRuntime.main_thread == 0) { - _PyRuntime.main_thread = PyThread_get_thread_ident(); - } +#ifdef HAVE_FORK + interp->before_forkers = NULL; + interp->after_forkers_parent = NULL; + interp->after_forkers_child = NULL; +#endif + interp->pyexitfunc = NULL; + interp->pyexitmodule = NULL; HEAD_LOCK(); if (_PyRuntime.interpreters.next_id < 0) { @@ -211,9 +223,6 @@ PyInterpreterState_Clear(PyInterpreterState *interp) Py_CLEAR(interp->after_forkers_parent); Py_CLEAR(interp->after_forkers_child); #endif - // XXX Once we have one allocator per interpreter (i.e. - // per-interpreter GC) we must ensure that all of the interpreter's - // objects have been cleaned up at the point. } @@ -254,9 +263,6 @@ PyInterpreterState_Delete(PyInterpreterState *interp) if (interp->id_mutex != NULL) { PyThread_free_lock(interp->id_mutex); } - if (interp->ceval.pending.lock != NULL) { - PyThread_free_lock(interp->ceval.pending.lock); - } PyMem_RawFree(interp); } @@ -328,37 +334,26 @@ PyInterpreterState_GetID(PyInterpreterState *interp) } -static PyInterpreterState * -interp_look_up_id(PY_INT64_T requested_id) +PyInterpreterState * +_PyInterpreterState_LookUpID(PY_INT64_T requested_id) { + if (requested_id < 0) + goto error; + PyInterpreterState *interp = PyInterpreterState_Head(); while (interp != NULL) { PY_INT64_T id = PyInterpreterState_GetID(interp); - if (id < 0) { + if (id < 0) return NULL; - } - if (requested_id == id) { + if (requested_id == id) return interp; - } interp = PyInterpreterState_Next(interp); } - return NULL; -} -PyInterpreterState * -_PyInterpreterState_LookUpID(PY_INT64_T requested_id) -{ - PyInterpreterState *interp = NULL; - if (requested_id >= 0) { - HEAD_UNLOCK(); - interp = interp_look_up_id(requested_id); - HEAD_UNLOCK(); - } - if (interp == NULL && !PyErr_Occurred()) { - PyErr_Format(PyExc_RuntimeError, - "unrecognized interpreter ID %lld", requested_id); - } - return interp; +error: + PyErr_Format(PyExc_RuntimeError, + "unrecognized interpreter ID %lld", requested_id); + return NULL; } @@ -403,7 +398,7 @@ _PyInterpreterState_IDDecref(PyInterpreterState *interp) int64_t refcount = interp->id_refcount; PyThread_release_lock(interp->id_mutex); - if (refcount == 0 && interp->requires_idref) { + if (refcount == 0) { // XXX Using the "head" thread isn't strictly correct. PyThreadState *tstate = PyInterpreterState_ThreadHead(interp); // XXX Possible GILState issues? @@ -413,18 +408,6 @@ _PyInterpreterState_IDDecref(PyInterpreterState *interp) } } -int -_PyInterpreterState_RequiresIDRef(PyInterpreterState *interp) -{ - return interp->requires_idref; -} - -void -_PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required) -{ - interp->requires_idref = required ? 1 : 0; -} - _PyCoreConfig * _PyInterpreterState_GetCoreConfig(PyInterpreterState *interp) { @@ -437,16 +420,6 @@ _PyInterpreterState_GetMainConfig(PyInterpreterState *interp) return &interp->config; } -PyObject * -_PyInterpreterState_GetMainModule(PyInterpreterState *interp) -{ - if (interp->modules == NULL) { - PyErr_SetString(PyExc_RuntimeError, "interpreter not initialized"); - return NULL; - } - return PyMapping_GetItemString(interp->modules, "__main__"); -} - /* Default implementation for _PyThreadState_GetFrame */ static struct _frame * threadstate_getframe(PyThreadState *self) @@ -899,7 +872,7 @@ PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc) p->async_exc = exc; HEAD_UNLOCK(); Py_XDECREF(old_exc); - _PyEval_SignalAsyncExc(interp); + _PyEval_SignalAsyncExc(); return 1; } } @@ -1313,7 +1286,7 @@ _PyObject_GetCrossInterpreterData(PyObject *obj, _PyCrossInterpreterData *data) return 0; } -static int +static void _release_xidata(void *arg) { _PyCrossInterpreterData *data = (_PyCrossInterpreterData *)arg; @@ -1321,8 +1294,30 @@ _release_xidata(void *arg) data->free(data->data); } Py_XDECREF(data->obj); - PyMem_Free(data); - return 0; +} + +static void +_call_in_interpreter(PyInterpreterState *interp, + void (*func)(void *), void *arg) +{ + /* We would use Py_AddPendingCall() if it weren't specific to the + * main interpreter (see bpo-33608). In the meantime we take a + * naive approach. + */ + PyThreadState *save_tstate = NULL; + if (interp != _PyInterpreterState_Get()) { + // XXX Using the "head" thread isn't strictly correct. + PyThreadState *tstate = PyInterpreterState_ThreadHead(interp); + // XXX Possible GILState issues? + save_tstate = PyThreadState_Swap(tstate); + } + + func(arg); + + // Switch back. + if (save_tstate != NULL) { + PyThreadState_Swap(save_tstate); + } } void @@ -1333,7 +1328,7 @@ _PyCrossInterpreterData_Release(_PyCrossInterpreterData *data) return; } - // Get the original interpreter. + // Switch to the original interpreter. PyInterpreterState *interp = _PyInterpreterState_LookUpID(data->interp); if (interp == NULL) { // The intepreter was already destroyed. @@ -1342,24 +1337,9 @@ _PyCrossInterpreterData_Release(_PyCrossInterpreterData *data) } return; } - // XXX There's an ever-so-slight race here... - if (interp->finalizing) { - // XXX Someone leaked some memory... - return; - } // "Release" the data and/or the object. - _PyCrossInterpreterData *copied = PyMem_Malloc(sizeof(_PyCrossInterpreterData)); - if (copied == NULL) { - PyErr_SetString(PyExc_MemoryError, - "Not enough memory to preserve cross-interpreter data"); - PyErr_Print(); - return; - } - memcpy(copied, data, sizeof(_PyCrossInterpreterData)); - if (_Py_AddPendingCall(interp, 0, _release_xidata, copied) != 0) { - // XXX Queue full or couldn't get lock. Try again somehow? - } + _call_in_interpreter(interp, _release_xidata, data); } PyObject * @@ -1392,7 +1372,7 @@ _register_xidata(PyTypeObject *cls, crossinterpdatafunc getdata) static void _register_builtins_for_crossinterpreter_data(void); int -_PyCrossInterpreterData_RegisterClass(PyTypeObject *cls, +_PyCrossInterpreterData_Register_Class(PyTypeObject *cls, crossinterpdatafunc getdata) { if (!PyType_Check(cls)) { |