Merge pull request #94169 from RandomShaper/wtp_rl_prize

Batch of fixes for `WorkerThreadPool` and `ResourceLoader`
This commit is contained in:
Rémi Verschelde 2024-08-26 10:51:25 +02:00
commit 0ba0be27c7
No known key found for this signature in database
GPG Key ID: C3336907360768E1
12 changed files with 289 additions and 217 deletions

View File

@ -861,7 +861,7 @@ else: # GCC, Clang
if cc_version_major >= 11: # Broke on MethodBind templates before GCC 11. if cc_version_major >= 11: # Broke on MethodBind templates before GCC 11.
env.Append(CCFLAGS=["-Wlogical-op"]) env.Append(CCFLAGS=["-Wlogical-op"])
elif methods.using_clang(env) or methods.using_emcc(env): elif methods.using_clang(env) or methods.using_emcc(env):
env.Append(CCFLAGS=["-Wimplicit-fallthrough"]) env.Append(CCFLAGS=["-Wimplicit-fallthrough", "-Wno-undefined-var-template"])
elif env["warnings"] == "all": elif env["warnings"] == "all":
env.Append(CCFLAGS=["-Wall"] + common_warnings) env.Append(CCFLAGS=["-Wall"] + common_warnings)
elif env["warnings"] == "moderate": elif env["warnings"] == "moderate":

View File

@ -207,34 +207,53 @@ void ResourceFormatLoader::_bind_methods() {
/////////////////////////////////// ///////////////////////////////////
// These are used before and after a wait for a WorkerThreadPool task
// because that can lead to another load started in the same thread,
// something we must treat as a different stack for the purposes
// of tracking nesting.
#define PREPARE_FOR_WTP_WAIT \
int load_nesting_backup = ResourceLoader::load_nesting; \
Vector<String> load_paths_stack_backup = ResourceLoader::load_paths_stack; \
ResourceLoader::load_nesting = 0; \
ResourceLoader::load_paths_stack.clear();
#define RESTORE_AFTER_WTP_WAIT \
DEV_ASSERT(ResourceLoader::load_nesting == 0); \
DEV_ASSERT(ResourceLoader::load_paths_stack.is_empty()); \
ResourceLoader::load_nesting = load_nesting_backup; \
ResourceLoader::load_paths_stack = load_paths_stack_backup; \
load_paths_stack_backup.clear();
// This should be robust enough to be called redundantly without issues. // This should be robust enough to be called redundantly without issues.
void ResourceLoader::LoadToken::clear() { void ResourceLoader::LoadToken::clear() {
thread_load_mutex.lock(); thread_load_mutex.lock();
WorkerThreadPool::TaskID task_to_await = 0; WorkerThreadPool::TaskID task_to_await = 0;
// User-facing tokens shouldn't be deleted until completely claimed.
DEV_ASSERT(user_rc == 0 && user_path.is_empty());
if (!local_path.is_empty()) { // Empty is used for the special case where the load task is not registered. if (!local_path.is_empty()) { // Empty is used for the special case where the load task is not registered.
DEV_ASSERT(thread_load_tasks.has(local_path)); DEV_ASSERT(thread_load_tasks.has(local_path));
ThreadLoadTask &load_task = thread_load_tasks[local_path]; ThreadLoadTask &load_task = thread_load_tasks[local_path];
if (!load_task.awaited) { if (load_task.task_id && !load_task.awaited) {
task_to_await = load_task.task_id; task_to_await = load_task.task_id;
load_task.awaited = true;
} }
// Removing a task which is still in progress would be catastrophic.
// Tokens must be alive until the task thread function is done.
DEV_ASSERT(load_task.status == THREAD_LOAD_FAILED || load_task.status == THREAD_LOAD_LOADED);
thread_load_tasks.erase(local_path); thread_load_tasks.erase(local_path);
local_path.clear(); local_path.clear();
} }
if (!user_path.is_empty()) {
DEV_ASSERT(user_load_tokens.has(user_path));
user_load_tokens.erase(user_path);
user_path.clear();
}
thread_load_mutex.unlock(); thread_load_mutex.unlock();
// If task is unused, await it here, locally, now the token data is consistent. // If task is unused, await it here, locally, now the token data is consistent.
if (task_to_await) { if (task_to_await) {
PREPARE_FOR_WTP_WAIT
WorkerThreadPool::get_singleton()->wait_for_task_completion(task_to_await); WorkerThreadPool::get_singleton()->wait_for_task_completion(task_to_await);
RESTORE_AFTER_WTP_WAIT
} }
} }
@ -295,11 +314,11 @@ Ref<Resource> ResourceLoader::_load(const String &p_path, const String &p_origin
ERR_FAIL_V_MSG(Ref<Resource>(), vformat("No loader found for resource: %s (expected type: %s)", p_path, p_type_hint)); ERR_FAIL_V_MSG(Ref<Resource>(), vformat("No loader found for resource: %s (expected type: %s)", p_path, p_type_hint));
} }
void ResourceLoader::_thread_load_function(void *p_userdata) { // This implementation must allow re-entrancy for a task that started awaiting in a deeper stack frame.
void ResourceLoader::_run_load_task(void *p_userdata) {
ThreadLoadTask &load_task = *(ThreadLoadTask *)p_userdata; ThreadLoadTask &load_task = *(ThreadLoadTask *)p_userdata;
thread_load_mutex.lock(); thread_load_mutex.lock();
caller_task_id = load_task.task_id;
if (cleaning_tasks) { if (cleaning_tasks) {
load_task.status = THREAD_LOAD_FAILED; load_task.status = THREAD_LOAD_FAILED;
thread_load_mutex.unlock(); thread_load_mutex.unlock();
@ -322,8 +341,10 @@ void ResourceLoader::_thread_load_function(void *p_userdata) {
} }
// -- // --
bool xl_remapped = false;
const String &remapped_path = _path_remap(load_task.local_path, &xl_remapped);
Error load_err = OK; Error load_err = OK;
Ref<Resource> res = _load(load_task.remapped_path, load_task.remapped_path != load_task.local_path ? load_task.local_path : String(), load_task.type_hint, load_task.cache_mode, &load_err, load_task.use_sub_threads, &load_task.progress); Ref<Resource> res = _load(remapped_path, remapped_path != load_task.local_path ? load_task.local_path : String(), load_task.type_hint, load_task.cache_mode, &load_err, load_task.use_sub_threads, &load_task.progress);
if (MessageQueue::get_singleton() != MessageQueue::get_main_singleton()) { if (MessageQueue::get_singleton() != MessageQueue::get_main_singleton()) {
MessageQueue::get_singleton()->flush(); MessageQueue::get_singleton()->flush();
} }
@ -356,27 +377,40 @@ void ResourceLoader::_thread_load_function(void *p_userdata) {
unlock_pending = false; unlock_pending = false;
if (!ignoring) { if (!ignoring) {
if (replacing) { ResourceCache::lock.lock(); // Check and operations must happen atomically.
Ref<Resource> old_res = ResourceCache::get_ref(load_task.local_path); bool pending_unlock = true;
if (old_res.is_valid() && old_res != load_task.resource) { Ref<Resource> old_res = ResourceCache::get_ref(load_task.local_path);
// If resource is already loaded, only replace its data, to avoid existing invalidating instances. if (old_res.is_valid()) {
old_res->copy_from(load_task.resource); if (old_res != load_task.resource) {
// Resource can already exists at this point for two reasons:
// a) The load uses replace mode.
// b) There were more than one load in flight for the same path because of deadlock prevention.
// Either case, we want to keep the resource that was already there.
ResourceCache::lock.unlock();
pending_unlock = false;
if (replacing) {
old_res->copy_from(load_task.resource);
}
load_task.resource = old_res; load_task.resource = old_res;
} }
} else {
load_task.resource->set_path(load_task.local_path);
}
if (pending_unlock) {
ResourceCache::lock.unlock();
} }
load_task.resource->set_path(load_task.local_path, replacing);
} else { } else {
load_task.resource->set_path_cache(load_task.local_path); load_task.resource->set_path_cache(load_task.local_path);
} }
if (load_task.xl_remapped) { if (xl_remapped) {
load_task.resource->set_as_translation_remapped(true); load_task.resource->set_as_translation_remapped(true);
} }
#ifdef TOOLS_ENABLED #ifdef TOOLS_ENABLED
load_task.resource->set_edited(false); load_task.resource->set_edited(false);
if (timestamp_on_load) { if (timestamp_on_load) {
uint64_t mt = FileAccess::get_modified_time(load_task.remapped_path); uint64_t mt = FileAccess::get_modified_time(remapped_path);
//printf("mt %s: %lli\n",remapped_path.utf8().get_data(),mt); //printf("mt %s: %lli\n",remapped_path.utf8().get_data(),mt);
load_task.resource->set_last_modified_time(mt); load_task.resource->set_last_modified_time(mt);
} }
@ -426,36 +460,44 @@ static String _validate_local_path(const String &p_path) {
} }
Error ResourceLoader::load_threaded_request(const String &p_path, const String &p_type_hint, bool p_use_sub_threads, ResourceFormatLoader::CacheMode p_cache_mode) { Error ResourceLoader::load_threaded_request(const String &p_path, const String &p_type_hint, bool p_use_sub_threads, ResourceFormatLoader::CacheMode p_cache_mode) {
thread_load_mutex.lock(); Ref<ResourceLoader::LoadToken> token = _load_start(p_path, p_type_hint, p_use_sub_threads ? LOAD_THREAD_DISTRIBUTE : LOAD_THREAD_SPAWN_SINGLE, p_cache_mode, true);
if (user_load_tokens.has(p_path)) { return token.is_valid() ? OK : FAILED;
print_verbose("load_threaded_request(): Another threaded load for resource path '" + p_path + "' has been initiated. Not an error."); }
user_load_tokens[p_path]->reference(); // Additional request.
thread_load_mutex.unlock();
return OK;
}
user_load_tokens[p_path] = nullptr;
thread_load_mutex.unlock();
Ref<ResourceLoader::LoadToken> token = _load_start(p_path, p_type_hint, p_use_sub_threads ? LOAD_THREAD_DISTRIBUTE : LOAD_THREAD_SPAWN_SINGLE, p_cache_mode); ResourceLoader::LoadToken *ResourceLoader::_load_threaded_request_reuse_user_token(const String &p_path) {
if (token.is_valid()) { HashMap<String, LoadToken *>::Iterator E = user_load_tokens.find(p_path);
thread_load_mutex.lock(); if (E) {
token->user_path = p_path; print_verbose("load_threaded_request(): Another threaded load for resource path '" + p_path + "' has been initiated. Not an error.");
token->reference(); // First request. LoadToken *token = E->value;
user_load_tokens[p_path] = token.ptr(); token->user_rc++;
print_lt("REQUEST: user load tokens: " + itos(user_load_tokens.size())); return token;
thread_load_mutex.unlock();
return OK;
} else { } else {
return FAILED; return nullptr;
} }
} }
void ResourceLoader::_load_threaded_request_setup_user_token(LoadToken *p_token, const String &p_path) {
p_token->user_path = p_path;
p_token->reference(); // Extra RC until all user requests have been gotten.
p_token->user_rc = 1;
user_load_tokens[p_path] = p_token;
print_lt("REQUEST: user load tokens: " + itos(user_load_tokens.size()));
}
Ref<Resource> ResourceLoader::load(const String &p_path, const String &p_type_hint, ResourceFormatLoader::CacheMode p_cache_mode, Error *r_error) { Ref<Resource> ResourceLoader::load(const String &p_path, const String &p_type_hint, ResourceFormatLoader::CacheMode p_cache_mode, Error *r_error) {
if (r_error) { if (r_error) {
*r_error = OK; *r_error = OK;
} }
Ref<LoadToken> load_token = _load_start(p_path, p_type_hint, LOAD_THREAD_FROM_CURRENT, p_cache_mode); LoadThreadMode thread_mode = LOAD_THREAD_FROM_CURRENT;
if (WorkerThreadPool::get_singleton()->get_caller_task_id() != WorkerThreadPool::INVALID_TASK_ID) {
// If user is initiating a single-threaded load from a WorkerThreadPool task,
// we instead spawn a new task so there's a precondition that a load in a pool task
// is always initiated by the engine. That makes certain aspects simpler, such as
// cyclic load detection and awaiting.
thread_mode = LOAD_THREAD_SPAWN_SINGLE;
}
Ref<LoadToken> load_token = _load_start(p_path, p_type_hint, thread_mode, p_cache_mode);
if (!load_token.is_valid()) { if (!load_token.is_valid()) {
if (r_error) { if (r_error) {
*r_error = FAILED; *r_error = FAILED;
@ -467,7 +509,7 @@ Ref<Resource> ResourceLoader::load(const String &p_path, const String &p_type_hi
return res; return res;
} }
Ref<ResourceLoader::LoadToken> ResourceLoader::_load_start(const String &p_path, const String &p_type_hint, LoadThreadMode p_thread_mode, ResourceFormatLoader::CacheMode p_cache_mode) { Ref<ResourceLoader::LoadToken> ResourceLoader::_load_start(const String &p_path, const String &p_type_hint, LoadThreadMode p_thread_mode, ResourceFormatLoader::CacheMode p_cache_mode, bool p_for_user) {
String local_path = _validate_local_path(p_path); String local_path = _validate_local_path(p_path);
bool ignoring_cache = p_cache_mode == ResourceFormatLoader::CACHE_MODE_IGNORE || p_cache_mode == ResourceFormatLoader::CACHE_MODE_IGNORE_DEEP; bool ignoring_cache = p_cache_mode == ResourceFormatLoader::CACHE_MODE_IGNORE || p_cache_mode == ResourceFormatLoader::CACHE_MODE_IGNORE_DEEP;
@ -480,6 +522,13 @@ Ref<ResourceLoader::LoadToken> ResourceLoader::_load_start(const String &p_path,
{ {
MutexLock thread_load_lock(thread_load_mutex); MutexLock thread_load_lock(thread_load_mutex);
if (p_for_user) {
LoadToken *existing_token = _load_threaded_request_reuse_user_token(p_path);
if (existing_token) {
return Ref<LoadToken>(existing_token);
}
}
if (!ignoring_cache && thread_load_tasks.has(local_path)) { if (!ignoring_cache && thread_load_tasks.has(local_path)) {
load_token = Ref<LoadToken>(thread_load_tasks[local_path].load_token); load_token = Ref<LoadToken>(thread_load_tasks[local_path].load_token);
if (load_token.is_valid()) { if (load_token.is_valid()) {
@ -493,12 +542,14 @@ Ref<ResourceLoader::LoadToken> ResourceLoader::_load_start(const String &p_path,
load_token.instantiate(); load_token.instantiate();
load_token->local_path = local_path; load_token->local_path = local_path;
if (p_for_user) {
_load_threaded_request_setup_user_token(load_token.ptr(), p_path);
}
//create load task //create load task
{ {
ThreadLoadTask load_task; ThreadLoadTask load_task;
load_task.remapped_path = _path_remap(local_path, &load_task.xl_remapped);
load_task.load_token = load_token.ptr(); load_task.load_token = load_token.ptr();
load_task.local_path = local_path; load_task.local_path = local_path;
load_task.type_hint = p_type_hint; load_task.type_hint = p_type_hint;
@ -511,6 +562,7 @@ Ref<ResourceLoader::LoadToken> ResourceLoader::_load_start(const String &p_path,
load_task.resource = existing; load_task.resource = existing;
load_task.status = THREAD_LOAD_LOADED; load_task.status = THREAD_LOAD_LOADED;
load_task.progress = 1.0; load_task.progress = 1.0;
DEV_ASSERT(!thread_load_tasks.has(local_path));
thread_load_tasks[local_path] = load_task; thread_load_tasks[local_path] = load_task;
return load_token; return load_token;
} }
@ -532,14 +584,20 @@ Ref<ResourceLoader::LoadToken> ResourceLoader::_load_start(const String &p_path,
run_on_current_thread = must_not_register || p_thread_mode == LOAD_THREAD_FROM_CURRENT; run_on_current_thread = must_not_register || p_thread_mode == LOAD_THREAD_FROM_CURRENT;
if (run_on_current_thread) { if (run_on_current_thread) {
load_task_ptr->thread_id = Thread::get_caller_id(); // The current thread may happen to be a thread from the pool.
WorkerThreadPool::TaskID tid = WorkerThreadPool::get_singleton()->get_caller_task_id();
if (tid != WorkerThreadPool::INVALID_TASK_ID) {
load_task_ptr->task_id = tid;
} else {
load_task_ptr->thread_id = Thread::get_caller_id();
}
} else { } else {
load_task_ptr->task_id = WorkerThreadPool::get_singleton()->add_native_task(&ResourceLoader::_thread_load_function, load_task_ptr); load_task_ptr->task_id = WorkerThreadPool::get_singleton()->add_native_task(&ResourceLoader::_run_load_task, load_task_ptr);
} }
} } // MutexLock(thread_load_mutex).
if (run_on_current_thread) { if (run_on_current_thread) {
_thread_load_function(load_task_ptr); _run_load_task(load_task_ptr);
if (must_not_register) { if (must_not_register) {
load_token->res_if_unregistered = load_task_ptr->resource; load_token->res_if_unregistered = load_task_ptr->resource;
} }
@ -626,13 +684,7 @@ Ref<Resource> ResourceLoader::load_threaded_get(const String &p_path, Error *r_e
} }
LoadToken *load_token = user_load_tokens[p_path]; LoadToken *load_token = user_load_tokens[p_path];
if (!load_token) { DEV_ASSERT(load_token->user_rc >= 1);
// This happens if requested from one thread and rapidly querying from another.
if (r_error) {
*r_error = ERR_BUSY;
}
return Ref<Resource>();
}
// Support userland requesting on the main thread before the load is reported to be complete. // Support userland requesting on the main thread before the load is reported to be complete.
if (Thread::is_main_thread() && !load_token->local_path.is_empty()) { if (Thread::is_main_thread() && !load_token->local_path.is_empty()) {
@ -649,8 +701,15 @@ Ref<Resource> ResourceLoader::load_threaded_get(const String &p_path, Error *r_e
} }
res = _load_complete_inner(*load_token, r_error, thread_load_lock); res = _load_complete_inner(*load_token, r_error, thread_load_lock);
if (load_token->unreference()) {
memdelete(load_token); load_token->user_rc--;
if (load_token->user_rc == 0) {
load_token->user_path.clear();
user_load_tokens.erase(p_path);
if (load_token->unreference()) {
memdelete(load_token);
load_token = nullptr;
}
} }
} }
@ -682,7 +741,7 @@ Ref<Resource> ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Erro
if (load_task.status == THREAD_LOAD_IN_PROGRESS) { if (load_task.status == THREAD_LOAD_IN_PROGRESS) {
DEV_ASSERT((load_task.task_id == 0) != (load_task.thread_id == 0)); DEV_ASSERT((load_task.task_id == 0) != (load_task.thread_id == 0));
if ((load_task.task_id != 0 && load_task.task_id == caller_task_id) || if ((load_task.task_id != 0 && load_task.task_id == WorkerThreadPool::get_singleton()->get_caller_task_id()) ||
(load_task.thread_id != 0 && load_task.thread_id == Thread::get_caller_id())) { (load_task.thread_id != 0 && load_task.thread_id == Thread::get_caller_id())) {
// Load is in progress, but it's precisely this thread the one in charge. // Load is in progress, but it's precisely this thread the one in charge.
// That means this is a cyclic load. // That means this is a cyclic load.
@ -693,55 +752,45 @@ Ref<Resource> ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Erro
} }
bool loader_is_wtp = load_task.task_id != 0; bool loader_is_wtp = load_task.task_id != 0;
Error wtp_task_err = FAILED;
if (loader_is_wtp) { if (loader_is_wtp) {
// Loading thread is in the worker pool. // Loading thread is in the worker pool.
load_task.awaited = true;
thread_load_mutex.unlock(); thread_load_mutex.unlock();
wtp_task_err = WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id);
}
if (load_task.status == THREAD_LOAD_IN_PROGRESS) { // If early errored, awaiting would deadlock. PREPARE_FOR_WTP_WAIT
if (loader_is_wtp) { Error wait_err = WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id);
if (wtp_task_err == ERR_BUSY) { RESTORE_AFTER_WTP_WAIT
// The WorkerThreadPool has reported that the current task wants to await on an older one.
// That't not allowed for safety, to avoid deadlocks. Fortunately, though, in the context of DEV_ASSERT(!wait_err || wait_err == ERR_BUSY);
// resource loading that means that the task to wait for can be restarted here to break the if (wait_err == ERR_BUSY) {
// cycle, with as much recursion into this process as needed. // The WorkerThreadPool has reported that the current task wants to await on an older one.
// When the stack is eventually unrolled, the original load will have been notified to go on. // That't not allowed for safety, to avoid deadlocks. Fortunately, though, in the context of
// CACHE_MODE_IGNORE is needed because, otherwise, the new request would just see there's // resource loading that means that the task to wait for can be restarted here to break the
// an ongoing load for that resource and wait for it again. This value forces a new load. // cycle, with as much recursion into this process as needed.
Ref<ResourceLoader::LoadToken> token = _load_start(load_task.local_path, load_task.type_hint, LOAD_THREAD_DISTRIBUTE, ResourceFormatLoader::CACHE_MODE_IGNORE); // When the stack is eventually unrolled, the original load will have been notified to go on.
Ref<Resource> resource = _load_complete(*token.ptr(), &wtp_task_err); _run_load_task(&load_task);
if (r_error) {
*r_error = wtp_task_err;
}
thread_load_mutex.lock();
return resource;
} else {
DEV_ASSERT(wtp_task_err == OK);
thread_load_mutex.lock();
}
} else if (load_task.need_wait) {
// Loading thread is main or user thread.
if (!load_task.cond_var) {
load_task.cond_var = memnew(ConditionVariable);
}
load_task.awaiters_count++;
do {
load_task.cond_var->wait(p_thread_load_lock);
DEV_ASSERT(thread_load_tasks.has(p_load_token.local_path) && p_load_token.get_reference_count());
} while (load_task.need_wait);
load_task.awaiters_count--;
if (load_task.awaiters_count == 0) {
memdelete(load_task.cond_var);
load_task.cond_var = nullptr;
}
} }
} else {
if (loader_is_wtp) { thread_load_mutex.lock();
thread_load_mutex.lock(); load_task.awaited = true;
DEV_ASSERT(load_task.status == THREAD_LOAD_FAILED || load_task.status == THREAD_LOAD_LOADED);
} else if (load_task.need_wait) {
// Loading thread is main or user thread.
if (!load_task.cond_var) {
load_task.cond_var = memnew(ConditionVariable);
} }
load_task.awaiters_count++;
do {
load_task.cond_var->wait(p_thread_load_lock);
DEV_ASSERT(thread_load_tasks.has(p_load_token.local_path) && p_load_token.get_reference_count());
} while (load_task.need_wait);
load_task.awaiters_count--;
if (load_task.awaiters_count == 0) {
memdelete(load_task.cond_var);
load_task.cond_var = nullptr;
}
DEV_ASSERT(load_task.status == THREAD_LOAD_FAILED || load_task.status == THREAD_LOAD_LOADED);
} }
} }
@ -1181,10 +1230,13 @@ void ResourceLoader::clear_thread_load_tasks() {
} }
while (user_load_tokens.begin()) { while (user_load_tokens.begin()) {
// User load tokens remove themselves from the map on destruction. LoadToken *user_token = user_load_tokens.begin()->value;
memdelete(user_load_tokens.begin()->value); user_load_tokens.remove(user_load_tokens.begin());
DEV_ASSERT(user_token->user_rc > 0 && !user_token->user_path.is_empty());
user_token->user_path.clear();
user_token->user_rc = 0;
user_token->unreference();
} }
user_load_tokens.clear();
thread_load_tasks.clear(); thread_load_tasks.clear();
@ -1302,12 +1354,15 @@ bool ResourceLoader::abort_on_missing_resource = true;
bool ResourceLoader::timestamp_on_load = false; bool ResourceLoader::timestamp_on_load = false;
thread_local int ResourceLoader::load_nesting = 0; thread_local int ResourceLoader::load_nesting = 0;
thread_local WorkerThreadPool::TaskID ResourceLoader::caller_task_id = 0;
thread_local Vector<String> ResourceLoader::load_paths_stack; thread_local Vector<String> ResourceLoader::load_paths_stack;
thread_local HashMap<int, HashMap<String, Ref<Resource>>> ResourceLoader::res_ref_overrides; thread_local HashMap<int, HashMap<String, Ref<Resource>>> ResourceLoader::res_ref_overrides;
SafeBinaryMutex<ResourceLoader::BINARY_MUTEX_TAG> &_get_res_loader_mutex() {
return ResourceLoader::thread_load_mutex;
}
template <> template <>
thread_local uint32_t SafeBinaryMutex<ResourceLoader::BINARY_MUTEX_TAG>::count = 0; thread_local SafeBinaryMutex<ResourceLoader::BINARY_MUTEX_TAG>::TLSData SafeBinaryMutex<ResourceLoader::BINARY_MUTEX_TAG>::tls_data(_get_res_loader_mutex());
SafeBinaryMutex<ResourceLoader::BINARY_MUTEX_TAG> ResourceLoader::thread_load_mutex; SafeBinaryMutex<ResourceLoader::BINARY_MUTEX_TAG> ResourceLoader::thread_load_mutex;
HashMap<String, ResourceLoader::ThreadLoadTask> ResourceLoader::thread_load_tasks; HashMap<String, ResourceLoader::ThreadLoadTask> ResourceLoader::thread_load_tasks;
bool ResourceLoader::cleaning_tasks = false; bool ResourceLoader::cleaning_tasks = false;

View File

@ -100,6 +100,8 @@ typedef Error (*ResourceLoaderImport)(const String &p_path);
typedef void (*ResourceLoadedCallback)(Ref<Resource> p_resource, const String &p_path); typedef void (*ResourceLoadedCallback)(Ref<Resource> p_resource, const String &p_path);
class ResourceLoader { class ResourceLoader {
friend class LoadToken;
enum { enum {
MAX_LOADERS = 64 MAX_LOADERS = 64
}; };
@ -121,6 +123,7 @@ public:
struct LoadToken : public RefCounted { struct LoadToken : public RefCounted {
String local_path; String local_path;
String user_path; String user_path;
uint32_t user_rc = 0; // Having user RC implies regular RC incremented in one, until the user RC reaches zero.
Ref<Resource> res_if_unregistered; Ref<Resource> res_if_unregistered;
void clear(); void clear();
@ -130,10 +133,13 @@ public:
static const int BINARY_MUTEX_TAG = 1; static const int BINARY_MUTEX_TAG = 1;
static Ref<LoadToken> _load_start(const String &p_path, const String &p_type_hint, LoadThreadMode p_thread_mode, ResourceFormatLoader::CacheMode p_cache_mode); static Ref<LoadToken> _load_start(const String &p_path, const String &p_type_hint, LoadThreadMode p_thread_mode, ResourceFormatLoader::CacheMode p_cache_mode, bool p_for_user = false);
static Ref<Resource> _load_complete(LoadToken &p_load_token, Error *r_error); static Ref<Resource> _load_complete(LoadToken &p_load_token, Error *r_error);
private: private:
static LoadToken *_load_threaded_request_reuse_user_token(const String &p_path);
static void _load_threaded_request_setup_user_token(LoadToken *p_token, const String &p_path);
static Ref<Resource> _load_complete_inner(LoadToken &p_load_token, Error *r_error, MutexLock<SafeBinaryMutex<BINARY_MUTEX_TAG>> &p_thread_load_lock); static Ref<Resource> _load_complete_inner(LoadToken &p_load_token, Error *r_error, MutexLock<SafeBinaryMutex<BINARY_MUTEX_TAG>> &p_thread_load_lock);
static Ref<ResourceFormatLoader> loader[MAX_LOADERS]; static Ref<ResourceFormatLoader> loader[MAX_LOADERS];
@ -171,7 +177,6 @@ private:
bool need_wait = true; bool need_wait = true;
LoadToken *load_token = nullptr; LoadToken *load_token = nullptr;
String local_path; String local_path;
String remapped_path;
String type_hint; String type_hint;
float progress = 0.0f; float progress = 0.0f;
float max_reported_progress = 0.0f; float max_reported_progress = 0.0f;
@ -180,18 +185,19 @@ private:
ResourceFormatLoader::CacheMode cache_mode = ResourceFormatLoader::CACHE_MODE_REUSE; ResourceFormatLoader::CacheMode cache_mode = ResourceFormatLoader::CACHE_MODE_REUSE;
Error error = OK; Error error = OK;
Ref<Resource> resource; Ref<Resource> resource;
bool xl_remapped = false;
bool use_sub_threads = false; bool use_sub_threads = false;
HashSet<String> sub_tasks; HashSet<String> sub_tasks;
}; };
static void _thread_load_function(void *p_userdata); static void _run_load_task(void *p_userdata);
static thread_local int load_nesting; static thread_local int load_nesting;
static thread_local WorkerThreadPool::TaskID caller_task_id;
static thread_local HashMap<int, HashMap<String, Ref<Resource>>> res_ref_overrides; // Outermost key is nesting level. static thread_local HashMap<int, HashMap<String, Ref<Resource>>> res_ref_overrides; // Outermost key is nesting level.
static thread_local Vector<String> load_paths_stack; static thread_local Vector<String> load_paths_stack;
static SafeBinaryMutex<BINARY_MUTEX_TAG> thread_load_mutex; static SafeBinaryMutex<BINARY_MUTEX_TAG> thread_load_mutex;
friend SafeBinaryMutex<BINARY_MUTEX_TAG> &_get_res_loader_mutex();
static HashMap<String, ThreadLoadTask> thread_load_tasks; static HashMap<String, ThreadLoadTask> thread_load_tasks;
static bool cleaning_tasks; static bool cleaning_tasks;

View File

@ -32,6 +32,7 @@
#include "core/object/script_language.h" #include "core/object/script_language.h"
#include "core/os/os.h" #include "core/os/os.h"
#include "core/os/safe_binary_mutex.h"
#include "core/os/thread_safe.h" #include "core/os/thread_safe.h"
WorkerThreadPool::Task *const WorkerThreadPool::ThreadData::YIELDING = (Task *)1; WorkerThreadPool::Task *const WorkerThreadPool::ThreadData::YIELDING = (Task *)1;
@ -46,7 +47,7 @@ void WorkerThreadPool::Task::free_template_userdata() {
WorkerThreadPool *WorkerThreadPool::singleton = nullptr; WorkerThreadPool *WorkerThreadPool::singleton = nullptr;
#ifdef THREADS_ENABLED #ifdef THREADS_ENABLED
thread_local uintptr_t WorkerThreadPool::unlockable_mutexes[MAX_UNLOCKABLE_MUTEXES] = {}; thread_local WorkerThreadPool::UnlockableLocks WorkerThreadPool::unlockable_locks[MAX_UNLOCKABLE_LOCKS];
#endif #endif
void WorkerThreadPool::_process_task(Task *p_task) { void WorkerThreadPool::_process_task(Task *p_task) {
@ -428,13 +429,9 @@ Error WorkerThreadPool::wait_for_task_completion(TaskID p_task_id) {
void WorkerThreadPool::_lock_unlockable_mutexes() { void WorkerThreadPool::_lock_unlockable_mutexes() {
#ifdef THREADS_ENABLED #ifdef THREADS_ENABLED
for (uint32_t i = 0; i < MAX_UNLOCKABLE_MUTEXES; i++) { for (uint32_t i = 0; i < MAX_UNLOCKABLE_LOCKS; i++) {
if (unlockable_mutexes[i]) { if (unlockable_locks[i].ulock) {
if ((((uintptr_t)unlockable_mutexes[i]) & 1) == 0) { unlockable_locks[i].ulock->lock();
((Mutex *)unlockable_mutexes[i])->lock();
} else {
((BinaryMutex *)(unlockable_mutexes[i] & ~1))->lock();
}
} }
} }
#endif #endif
@ -442,13 +439,9 @@ void WorkerThreadPool::_lock_unlockable_mutexes() {
void WorkerThreadPool::_unlock_unlockable_mutexes() { void WorkerThreadPool::_unlock_unlockable_mutexes() {
#ifdef THREADS_ENABLED #ifdef THREADS_ENABLED
for (uint32_t i = 0; i < MAX_UNLOCKABLE_MUTEXES; i++) { for (uint32_t i = 0; i < MAX_UNLOCKABLE_LOCKS; i++) {
if (unlockable_mutexes[i]) { if (unlockable_locks[i].ulock) {
if ((((uintptr_t)unlockable_mutexes[i]) & 1) == 0) { unlockable_locks[i].ulock->unlock();
((Mutex *)unlockable_mutexes[i])->unlock();
} else {
((BinaryMutex *)(unlockable_mutexes[i] & ~1))->unlock();
}
} }
} }
#endif #endif
@ -665,38 +658,38 @@ int WorkerThreadPool::get_thread_index() {
return singleton->thread_ids.has(tid) ? singleton->thread_ids[tid] : -1; return singleton->thread_ids.has(tid) ? singleton->thread_ids[tid] : -1;
} }
WorkerThreadPool::TaskID WorkerThreadPool::get_caller_task_id() {
int th_index = get_thread_index();
if (th_index != -1 && singleton->threads[th_index].current_task) {
return singleton->threads[th_index].current_task->self;
} else {
return INVALID_TASK_ID;
}
}
#ifdef THREADS_ENABLED #ifdef THREADS_ENABLED
uint32_t WorkerThreadPool::thread_enter_unlock_allowance_zone(Mutex *p_mutex) { uint32_t WorkerThreadPool::_thread_enter_unlock_allowance_zone(THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &p_ulock) {
return _thread_enter_unlock_allowance_zone(p_mutex, false); for (uint32_t i = 0; i < MAX_UNLOCKABLE_LOCKS; i++) {
} DEV_ASSERT((bool)unlockable_locks[i].ulock == (bool)unlockable_locks[i].rc);
if (unlockable_locks[i].ulock == &p_ulock) {
uint32_t WorkerThreadPool::thread_enter_unlock_allowance_zone(BinaryMutex *p_mutex) {
return _thread_enter_unlock_allowance_zone(p_mutex, true);
}
uint32_t WorkerThreadPool::_thread_enter_unlock_allowance_zone(void *p_mutex, bool p_is_binary) {
for (uint32_t i = 0; i < MAX_UNLOCKABLE_MUTEXES; i++) {
if (unlikely((unlockable_mutexes[i] & ~1) == (uintptr_t)p_mutex)) {
// Already registered in the current thread. // Already registered in the current thread.
return UINT32_MAX; unlockable_locks[i].rc++;
} return i;
if (!unlockable_mutexes[i]) { } else if (!unlockable_locks[i].ulock) {
unlockable_mutexes[i] = (uintptr_t)p_mutex; unlockable_locks[i].ulock = &p_ulock;
if (p_is_binary) { unlockable_locks[i].rc = 1;
unlockable_mutexes[i] |= 1;
}
return i; return i;
} }
} }
ERR_FAIL_V_MSG(UINT32_MAX, "No more unlockable mutex slots available. Engine bug."); ERR_FAIL_V_MSG(UINT32_MAX, "No more unlockable lock slots available. Engine bug.");
} }
void WorkerThreadPool::thread_exit_unlock_allowance_zone(uint32_t p_zone_id) { void WorkerThreadPool::thread_exit_unlock_allowance_zone(uint32_t p_zone_id) {
if (p_zone_id == UINT32_MAX) { DEV_ASSERT(unlockable_locks[p_zone_id].ulock && unlockable_locks[p_zone_id].rc);
return; unlockable_locks[p_zone_id].rc--;
if (unlockable_locks[p_zone_id].rc == 0) {
unlockable_locks[p_zone_id].ulock = nullptr;
} }
DEV_ASSERT(unlockable_mutexes[p_zone_id]);
unlockable_mutexes[p_zone_id] = 0;
} }
#endif #endif

View File

@ -162,8 +162,12 @@ private:
static WorkerThreadPool *singleton; static WorkerThreadPool *singleton;
#ifdef THREADS_ENABLED #ifdef THREADS_ENABLED
static const uint32_t MAX_UNLOCKABLE_MUTEXES = 2; static const uint32_t MAX_UNLOCKABLE_LOCKS = 2;
static thread_local uintptr_t unlockable_mutexes[MAX_UNLOCKABLE_MUTEXES]; struct UnlockableLocks {
THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> *ulock = nullptr;
uint32_t rc = 0;
};
static thread_local UnlockableLocks unlockable_locks[MAX_UNLOCKABLE_LOCKS];
#endif #endif
TaskID _add_task(const Callable &p_callable, void (*p_func)(void *), void *p_userdata, BaseTemplateUserdata *p_template_userdata, bool p_high_priority, const String &p_description); TaskID _add_task(const Callable &p_callable, void (*p_func)(void *), void *p_userdata, BaseTemplateUserdata *p_template_userdata, bool p_high_priority, const String &p_description);
@ -192,7 +196,7 @@ private:
void _wait_collaboratively(ThreadData *p_caller_pool_thread, Task *p_task); void _wait_collaboratively(ThreadData *p_caller_pool_thread, Task *p_task);
#ifdef THREADS_ENABLED #ifdef THREADS_ENABLED
static uint32_t _thread_enter_unlock_allowance_zone(void *p_mutex, bool p_is_binary); static uint32_t _thread_enter_unlock_allowance_zone(THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &p_ulock);
#endif #endif
void _lock_unlockable_mutexes(); void _lock_unlockable_mutexes();
@ -239,13 +243,17 @@ public:
static WorkerThreadPool *get_singleton() { return singleton; } static WorkerThreadPool *get_singleton() { return singleton; }
static int get_thread_index(); static int get_thread_index();
static TaskID get_caller_task_id();
#ifdef THREADS_ENABLED #ifdef THREADS_ENABLED
static uint32_t thread_enter_unlock_allowance_zone(Mutex *p_mutex); _ALWAYS_INLINE_ static uint32_t thread_enter_unlock_allowance_zone(const MutexLock<BinaryMutex> &p_lock) { return _thread_enter_unlock_allowance_zone(p_lock._get_lock()); }
static uint32_t thread_enter_unlock_allowance_zone(BinaryMutex *p_mutex); template <int Tag>
_ALWAYS_INLINE_ static uint32_t thread_enter_unlock_allowance_zone(const SafeBinaryMutex<Tag> &p_mutex) { return _thread_enter_unlock_allowance_zone(p_mutex._get_lock()); }
static void thread_exit_unlock_allowance_zone(uint32_t p_zone_id); static void thread_exit_unlock_allowance_zone(uint32_t p_zone_id);
#else #else
static uint32_t thread_enter_unlock_allowance_zone(void *p_mutex) { return UINT32_MAX; } static uint32_t thread_enter_unlock_allowance_zone(const MutexLock<BinaryMutex> &p_lock) { return UINT32_MAX; }
template <int Tag>
static uint32_t thread_enter_unlock_allowance_zone(const SafeBinaryMutex<Tag> &p_mutex) { return UINT32_MAX; }
static void thread_exit_unlock_allowance_zone(uint32_t p_zone_id) {} static void thread_exit_unlock_allowance_zone(uint32_t p_zone_id) {}
#endif #endif

View File

@ -32,6 +32,7 @@
#define CONDITION_VARIABLE_H #define CONDITION_VARIABLE_H
#include "core/os/mutex.h" #include "core/os/mutex.h"
#include "core/os/safe_binary_mutex.h"
#ifdef THREADS_ENABLED #ifdef THREADS_ENABLED
@ -56,7 +57,12 @@ class ConditionVariable {
public: public:
template <typename BinaryMutexT> template <typename BinaryMutexT>
_ALWAYS_INLINE_ void wait(const MutexLock<BinaryMutexT> &p_lock) const { _ALWAYS_INLINE_ void wait(const MutexLock<BinaryMutexT> &p_lock) const {
condition.wait(const_cast<THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &>(p_lock.lock)); condition.wait(const_cast<THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &>(p_lock._get_lock()));
}
template <int Tag>
_ALWAYS_INLINE_ void wait(const MutexLock<SafeBinaryMutex<Tag>> &p_lock) const {
condition.wait(const_cast<THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &>(p_lock.mutex._get_lock()));
} }
_ALWAYS_INLINE_ void notify_one() const { _ALWAYS_INLINE_ void notify_one() const {

View File

@ -72,13 +72,18 @@ public:
template <typename MutexT> template <typename MutexT>
class MutexLock { class MutexLock {
friend class ConditionVariable;
THREADING_NAMESPACE::unique_lock<typename MutexT::StdMutexType> lock; THREADING_NAMESPACE::unique_lock<typename MutexT::StdMutexType> lock;
public: public:
explicit MutexLock(const MutexT &p_mutex) : explicit MutexLock(const MutexT &p_mutex) :
lock(p_mutex.mutex) {} lock(p_mutex.mutex) {}
// Clarification: all the funny syntax is needed so this function exists only for binary mutexes.
template <typename T = MutexT>
_ALWAYS_INLINE_ THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &_get_lock(
typename std::enable_if<std::is_same<T, THREADING_NAMESPACE::mutex>::value> * = nullptr) const {
return const_cast<THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &>(lock);
}
}; };
using Mutex = MutexImpl<THREADING_NAMESPACE::recursive_mutex>; // Recursive, for general use using Mutex = MutexImpl<THREADING_NAMESPACE::recursive_mutex>; // Recursive, for general use

View File

@ -47,76 +47,76 @@
// Also, don't forget to declare the thread_local variable on each use. // Also, don't forget to declare the thread_local variable on each use.
template <int Tag> template <int Tag>
class SafeBinaryMutex { class SafeBinaryMutex {
friend class MutexLock<SafeBinaryMutex>; friend class MutexLock<SafeBinaryMutex<Tag>>;
using StdMutexType = THREADING_NAMESPACE::mutex; using StdMutexType = THREADING_NAMESPACE::mutex;
mutable THREADING_NAMESPACE::mutex mutex; mutable THREADING_NAMESPACE::mutex mutex;
static thread_local uint32_t count;
struct TLSData {
mutable THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> lock;
uint32_t count = 0;
TLSData(SafeBinaryMutex<Tag> &p_mutex) :
lock(p_mutex.mutex, THREADING_NAMESPACE::defer_lock) {}
};
static thread_local TLSData tls_data;
public: public:
_ALWAYS_INLINE_ void lock() const { _ALWAYS_INLINE_ void lock() const {
if (++count == 1) { if (++tls_data.count == 1) {
mutex.lock(); tls_data.lock.lock();
} }
} }
_ALWAYS_INLINE_ void unlock() const { _ALWAYS_INLINE_ void unlock() const {
DEV_ASSERT(count); DEV_ASSERT(tls_data.count);
if (--count == 0) { if (--tls_data.count == 0) {
mutex.unlock(); tls_data.lock.unlock();
} }
} }
_ALWAYS_INLINE_ bool try_lock() const { _ALWAYS_INLINE_ THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &_get_lock() const {
if (count) { return const_cast<THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &>(tls_data.lock);
count++;
return true;
} else {
if (mutex.try_lock()) {
count++;
return true;
} else {
return false;
}
}
} }
~SafeBinaryMutex() { _ALWAYS_INLINE_ SafeBinaryMutex() {
DEV_ASSERT(!count); }
_ALWAYS_INLINE_ ~SafeBinaryMutex() {
DEV_ASSERT(!tls_data.count);
} }
}; };
// This specialization is needed so manual locking and MutexLock can be used
// at the same time on a SafeBinaryMutex.
template <int Tag> template <int Tag>
class MutexLock<SafeBinaryMutex<Tag>> { class MutexLock<SafeBinaryMutex<Tag>> {
friend class ConditionVariable; friend class ConditionVariable;
THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> lock; const SafeBinaryMutex<Tag> &mutex;
public: public:
_ALWAYS_INLINE_ explicit MutexLock(const SafeBinaryMutex<Tag> &p_mutex) : explicit MutexLock(const SafeBinaryMutex<Tag> &p_mutex) :
lock(p_mutex.mutex) { mutex(p_mutex) {
SafeBinaryMutex<Tag>::count++; mutex.lock();
}; }
_ALWAYS_INLINE_ ~MutexLock() {
SafeBinaryMutex<Tag>::count--; ~MutexLock() {
}; mutex.unlock();
}
}; };
#else // No threads. #else // No threads.
template <int Tag> template <int Tag>
class SafeBinaryMutex : public MutexImpl { class SafeBinaryMutex {
static thread_local uint32_t count; struct TLSData {
}; TLSData(SafeBinaryMutex<Tag> &p_mutex) {}
};
static thread_local TLSData tls_data;
template <int Tag>
class MutexLock<SafeBinaryMutex<Tag>> {
public: public:
MutexLock(const SafeBinaryMutex<Tag> &p_mutex) {} void lock() const {}
~MutexLock() {} void unlock() const {}
}; };
#endif // THREADS_ENABLED #endif // THREADS_ENABLED

View File

@ -33,14 +33,6 @@
#include "core/config/project_settings.h" #include "core/config/project_settings.h"
#include "core/os/os.h" #include "core/os/os.h"
void CommandQueueMT::lock() {
mutex.lock();
}
void CommandQueueMT::unlock() {
mutex.unlock();
}
CommandQueueMT::CommandQueueMT() { CommandQueueMT::CommandQueueMT() {
command_mem.reserve(DEFAULT_COMMAND_MEM_SIZE_KB * 1024); command_mem.reserve(DEFAULT_COMMAND_MEM_SIZE_KB * 1024);
} }

View File

@ -362,23 +362,24 @@ class CommandQueueMT {
return; return;
} }
lock(); MutexLock lock(mutex);
uint32_t allowance_id = WorkerThreadPool::thread_enter_unlock_allowance_zone(&mutex);
while (flush_read_ptr < command_mem.size()) { while (flush_read_ptr < command_mem.size()) {
uint64_t size = *(uint64_t *)&command_mem[flush_read_ptr]; uint64_t size = *(uint64_t *)&command_mem[flush_read_ptr];
flush_read_ptr += 8; flush_read_ptr += 8;
CommandBase *cmd = reinterpret_cast<CommandBase *>(&command_mem[flush_read_ptr]); CommandBase *cmd = reinterpret_cast<CommandBase *>(&command_mem[flush_read_ptr]);
uint32_t allowance_id = WorkerThreadPool::thread_enter_unlock_allowance_zone(lock);
cmd->call(); cmd->call();
WorkerThreadPool::thread_exit_unlock_allowance_zone(allowance_id);
// Handle potential realloc due to the command and unlock allowance. // Handle potential realloc due to the command and unlock allowance.
cmd = reinterpret_cast<CommandBase *>(&command_mem[flush_read_ptr]); cmd = reinterpret_cast<CommandBase *>(&command_mem[flush_read_ptr]);
if (unlikely(cmd->sync)) { if (unlikely(cmd->sync)) {
sync_head++; sync_head++;
unlock(); // Give an opportunity to awaiters right away. lock.~MutexLock(); // Give an opportunity to awaiters right away.
sync_cond_var.notify_all(); sync_cond_var.notify_all();
lock(); new (&lock) MutexLock(mutex);
// Handle potential realloc happened during unlock. // Handle potential realloc happened during unlock.
cmd = reinterpret_cast<CommandBase *>(&command_mem[flush_read_ptr]); cmd = reinterpret_cast<CommandBase *>(&command_mem[flush_read_ptr]);
} }
@ -387,14 +388,11 @@ class CommandQueueMT {
flush_read_ptr += size; flush_read_ptr += size;
} }
WorkerThreadPool::thread_exit_unlock_allowance_zone(allowance_id);
command_mem.clear(); command_mem.clear();
flush_read_ptr = 0; flush_read_ptr = 0;
_prevent_sync_wraparound(); _prevent_sync_wraparound();
unlock();
} }
_FORCE_INLINE_ void _wait_for_sync(MutexLock<BinaryMutex> &p_lock) { _FORCE_INLINE_ void _wait_for_sync(MutexLock<BinaryMutex> &p_lock) {
@ -410,9 +408,6 @@ class CommandQueueMT {
void _no_op() {} void _no_op() {}
public: public:
void lock();
void unlock();
/* NORMAL PUSH COMMANDS */ /* NORMAL PUSH COMMANDS */
DECL_PUSH(0) DECL_PUSH(0)
SPACE_SEP_LIST(DECL_PUSH, 15) SPACE_SEP_LIST(DECL_PUSH, 15)
@ -446,9 +441,8 @@ public:
} }
void set_pump_task_id(WorkerThreadPool::TaskID p_task_id) { void set_pump_task_id(WorkerThreadPool::TaskID p_task_id) {
lock(); MutexLock lock(mutex);
pump_task_id = p_task_id; pump_task_id = p_task_id;
unlock();
} }
CommandQueueMT(); CommandQueueMT();

View File

@ -144,6 +144,14 @@ GDScriptParserRef::~GDScriptParserRef() {
GDScriptCache *GDScriptCache::singleton = nullptr; GDScriptCache *GDScriptCache::singleton = nullptr;
SafeBinaryMutex<GDScriptCache::BINARY_MUTEX_TAG> &_get_gdscript_cache_mutex() {
return GDScriptCache::mutex;
}
template <>
thread_local SafeBinaryMutex<GDScriptCache::BINARY_MUTEX_TAG>::TLSData SafeBinaryMutex<GDScriptCache::BINARY_MUTEX_TAG>::tls_data(_get_gdscript_cache_mutex());
SafeBinaryMutex<GDScriptCache::BINARY_MUTEX_TAG> GDScriptCache::mutex;
void GDScriptCache::move_script(const String &p_from, const String &p_to) { void GDScriptCache::move_script(const String &p_from, const String &p_to) {
if (singleton == nullptr || p_from == p_to) { if (singleton == nullptr || p_from == p_to) {
return; return;
@ -369,7 +377,7 @@ Ref<GDScript> GDScriptCache::get_full_script(const String &p_path, Error &r_erro
// Allowing lifting the lock might cause a script to be reloaded multiple times, // Allowing lifting the lock might cause a script to be reloaded multiple times,
// which, as a last resort deadlock prevention strategy, is a good tradeoff. // which, as a last resort deadlock prevention strategy, is a good tradeoff.
uint32_t allowance_id = WorkerThreadPool::thread_enter_unlock_allowance_zone(&singleton->mutex); uint32_t allowance_id = WorkerThreadPool::thread_enter_unlock_allowance_zone(singleton->mutex);
r_error = script->reload(true); r_error = script->reload(true);
WorkerThreadPool::thread_exit_unlock_allowance_zone(allowance_id); WorkerThreadPool::thread_exit_unlock_allowance_zone(allowance_id);
if (r_error) { if (r_error) {

View File

@ -34,7 +34,7 @@
#include "gdscript.h" #include "gdscript.h"
#include "core/object/ref_counted.h" #include "core/object/ref_counted.h"
#include "core/os/mutex.h" #include "core/os/safe_binary_mutex.h"
#include "core/templates/hash_map.h" #include "core/templates/hash_map.h"
#include "core/templates/hash_set.h" #include "core/templates/hash_set.h"
@ -95,7 +95,12 @@ class GDScriptCache {
bool cleared = false; bool cleared = false;
Mutex mutex; public:
static const int BINARY_MUTEX_TAG = 2;
private:
static SafeBinaryMutex<BINARY_MUTEX_TAG> mutex;
friend SafeBinaryMutex<BINARY_MUTEX_TAG> &_get_gdscript_cache_mutex();
public: public:
static void move_script(const String &p_from, const String &p_to); static void move_script(const String &p_from, const String &p_to);