-
Notifications
You must be signed in to change notification settings - Fork 13.4k
Add a jobserver proxy to ensure at least one token is always held #140145
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,11 +1,12 @@ | ||
use std::env::consts::{DLL_PREFIX, DLL_SUFFIX}; | ||
use std::path::{Path, PathBuf}; | ||
use std::sync::OnceLock; | ||
use std::sync::atomic::{AtomicBool, Ordering}; | ||
use std::sync::{Arc, OnceLock}; | ||
use std::{env, iter, thread}; | ||
|
||
use rustc_ast as ast; | ||
use rustc_codegen_ssa::traits::CodegenBackend; | ||
use rustc_data_structures::jobserver::Proxy; | ||
use rustc_data_structures::sync; | ||
use rustc_metadata::{DylibError, load_symbol_from_dylib}; | ||
use rustc_middle::ty::CurrentGcx; | ||
|
@@ -113,7 +114,7 @@ fn init_stack_size(early_dcx: &EarlyDiagCtxt) -> usize { | |
}) | ||
} | ||
|
||
fn run_in_thread_with_globals<F: FnOnce(CurrentGcx) -> R + Send, R: Send>( | ||
fn run_in_thread_with_globals<F: FnOnce(CurrentGcx, Arc<Proxy>) -> R + Send, R: Send>( | ||
thread_stack_size: usize, | ||
edition: Edition, | ||
sm_inputs: SourceMapInputs, | ||
|
@@ -139,7 +140,7 @@ fn run_in_thread_with_globals<F: FnOnce(CurrentGcx) -> R + Send, R: Send>( | |
edition, | ||
extra_symbols, | ||
Some(sm_inputs), | ||
|| f(CurrentGcx::new()), | ||
|| f(CurrentGcx::new(), Proxy::new()), | ||
) | ||
}) | ||
.unwrap() | ||
|
@@ -152,7 +153,10 @@ fn run_in_thread_with_globals<F: FnOnce(CurrentGcx) -> R + Send, R: Send>( | |
}) | ||
} | ||
|
||
pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce(CurrentGcx) -> R + Send, R: Send>( | ||
pub(crate) fn run_in_thread_pool_with_globals< | ||
F: FnOnce(CurrentGcx, Arc<Proxy>) -> R + Send, | ||
R: Send, | ||
>( | ||
thread_builder_diag: &EarlyDiagCtxt, | ||
edition: Edition, | ||
threads: usize, | ||
|
@@ -162,8 +166,8 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce(CurrentGcx) -> R + Send, | |
) -> R { | ||
use std::process; | ||
|
||
use rustc_data_structures::defer; | ||
use rustc_data_structures::sync::FromDyn; | ||
use rustc_data_structures::{defer, jobserver}; | ||
use rustc_middle::ty::tls; | ||
use rustc_query_impl::QueryCtxt; | ||
use rustc_query_system::query::{QueryContext, break_query_cycles}; | ||
|
@@ -178,22 +182,26 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce(CurrentGcx) -> R + Send, | |
edition, | ||
sm_inputs, | ||
extra_symbols, | ||
|current_gcx| { | ||
|current_gcx, jobserver_proxy| { | ||
// Register the thread for use with the `WorkerLocal` type. | ||
registry.register(); | ||
|
||
f(current_gcx) | ||
f(current_gcx, jobserver_proxy) | ||
}, | ||
); | ||
} | ||
|
||
let current_gcx = FromDyn::from(CurrentGcx::new()); | ||
let current_gcx2 = current_gcx.clone(); | ||
|
||
let proxy = Proxy::new(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hang on, won't this somewhat misbehave if you run multiple compiler session inside the same process? They would each think they have an implicit token. There is a reason There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If you run them in parallel yes, but you should already be acquiring an extra token per rustc instance if you're using a jobserver in that case. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So you would need to use a separate proxy instance for acquiring the extra token per rustc instance? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No you'd use There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If you do it like the following you would deadlock with a jobserver that has 1 token available total: for req in requests_stream {
let token = acquire_token();
spawn_rustc_instance(token);
} You did have to do something like let mut first_request = true;
for req in requests_stream {
let token = if first_request {
first_request = false;
None
} else {
Some(acquire_token())
};
spawn_rustc_instance(token);
} which is harder than making There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We could allow a |
||
|
||
let proxy_ = Arc::clone(&proxy); | ||
let proxy__ = Arc::clone(&proxy); | ||
let builder = rayon_core::ThreadPoolBuilder::new() | ||
.thread_name(|_| "rustc".to_string()) | ||
.acquire_thread_handler(jobserver::acquire_thread) | ||
.release_thread_handler(jobserver::release_thread) | ||
.acquire_thread_handler(move || proxy_.acquire_thread()) | ||
.release_thread_handler(move || proxy__.release_thread()) | ||
bjorn3 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
.num_threads(threads) | ||
.deadlock_handler(move || { | ||
// On deadlock, creates a new thread and forwards information in thread | ||
|
@@ -257,7 +265,7 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce(CurrentGcx) -> R + Send, | |
}, | ||
// Run `f` on the first thread in the thread pool. | ||
move |pool: &rayon_core::ThreadPool| { | ||
pool.install(|| f(current_gcx.into_inner())) | ||
pool.install(|| f(current_gcx.into_inner(), proxy)) | ||
}, | ||
) | ||
.unwrap() | ||
|
Uh oh!
There was an error while loading. Please reload this page.