Skip to content

Commit bd8bff3

Browse files
committed
fix nits
1 parent bf04343 commit bd8bff3

File tree

5 files changed

+91
-65
lines changed

5 files changed

+91
-65
lines changed

compiler/rustc_data_structures/src/sharded.rs

+10-5
Original file line numberDiff line numberDiff line change
@@ -76,11 +76,7 @@ impl<T: Default> Sharded<T> {
7676
}
7777

7878
#[inline]
79-
pub fn with_get_shard_by_hash<F: FnOnce(&mut T) -> R, R>(
80-
&self,
81-
hash: u64,
82-
f: F,
83-
) -> R {
79+
pub fn with_get_shard_by_hash<F: FnOnce(&mut T) -> R, R>(&self, hash: u64, f: F) -> R {
8480
if likely(self.single_thread) {
8581
let shard = &self.shard;
8682
assert!(!shard.borrow.replace(true));
@@ -92,6 +88,15 @@ impl<T: Default> Sharded<T> {
9288
}
9389
}
9490

91+
#[inline]
92+
pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> {
93+
if likely(self.single_thread) {
94+
&self.shard
95+
} else {
96+
&self.shards[get_shard_index_by_hash(make_hash(val))].0
97+
}
98+
}
99+
95100
#[inline]
96101
pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
97102
if likely(self.single_thread) {

compiler/rustc_errors/src/lib.rs

+1-6
Original file line numberDiff line numberDiff line change
@@ -639,12 +639,7 @@ impl Handler {
639639
) -> String {
640640
let inner = self.inner.borrow();
641641
let args = crate::translation::to_fluent_args(args);
642-
inner
643-
.emitter
644-
.translate_message(&message, &args)
645-
.map_err(Report::new)
646-
.unwrap()
647-
.to_string()
642+
inner.emitter.translate_message(&message, &args).map_err(Report::new).unwrap().to_string()
648643
}
649644

650645
// This is here to not allow mutation of flags;

compiler/rustc_query_system/src/query/plumbing.rs

+73-53
Original file line numberDiff line numberDiff line change
@@ -13,24 +13,22 @@ use crate::query::SerializedDepNodeIndex;
1313
use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
1414
use crate::values::Value;
1515
use crate::HandleCycleError;
16+
#[cfg(parallel_compiler)]
17+
use rustc_data_structures::cold_path;
1618
use rustc_data_structures::fingerprint::Fingerprint;
1719
use rustc_data_structures::fx::FxHashMap;
18-
use rustc_data_structures::stack::ensure_sufficient_stack;
19-
use rustc_data_structures::sync::Lock;
20-
#[cfg(parallel_compiler)]
21-
use rustc_data_structures::{cold_path, sharded::Sharded};
22-
use rustc_data_structures::profiling::TimingGuard;
2320
use rustc_data_structures::sharded::Sharded;
2421
use rustc_data_structures::stack::ensure_sufficient_stack;
25-
2622
use rustc_data_structures::sync::Lock;
23+
2724
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
28-
use rustc_session::Session;use rustc_span::{Span, DUMMY_SP};
25+
use rustc_span::{Span, DUMMY_SP};
2926
use std::cell::Cell;
3027
use std::collections::hash_map::Entry;
3128
use std::fmt::Debug;
3229
use std::hash::Hash;
3330
use std::mem;
31+
use std::ops::DerefMut;
3432
use thin_vec::ThinVec;
3533

3634
use super::QueryConfig;
@@ -226,7 +224,6 @@ where
226224

227225
#[cold]
228226
#[inline(never)]
229-
#[cfg(not(parallel_compiler))]
230227
fn cycle_error<Q, Qcx>(
231228
query: Q,
232229
qcx: Qcx,
@@ -296,7 +293,10 @@ where
296293
Qcx: QueryContext,
297294
{
298295
let state = query.query_state(qcx);
299-
/*// For the parallel compiler we need to check both the query cache and query state structures
296+
let mut state_lock = state.active.get_shard_by_value(&key).lock();
297+
let lock = state_lock.deref_mut();
298+
299+
// For the parallel compiler we need to check both the query cache and query state structures
300300
// while holding the state lock to ensure that 1) the query has not yet completed and 2) the
301301
// query is not still executing. Without checking the query cache here, we can end up
302302
// re-executing the query since `try_start` only checks that the query is not currently
@@ -307,35 +307,44 @@ where
307307
qcx.dep_context().profiler().query_cache_hit(index.into());
308308
return (value, Some(index));
309309
}
310-
}*/
311-
312-
let job = state.active.with_get_shard_by_value(&key, |state_lock| {
313-
JobOwner::<'_, Q::Key, Qcx::DepKind>::try_start(&qcx, state, state_lock, span, key)
314-
});
315-
316-
match job {
317-
TryGetJob::NotYetStarted(job) => {
318-
let (result, dep_node_index) = execute_job(query, qcx, key.clone(), dep_node, job.id);
319-
let cache = query.query_cache(qcx);
320-
if query.feedable() {
321-
// We should not compute queries that also got a value via feeding.
322-
// This can't happen, as query feeding adds the very dependencies to the fed query
323-
// as its feeding query had. So if the fed query is red, so is its feeder, which will
324-
// get evaluated first, and re-feed the query.
325-
if let Some((cached_result, _)) = cache.lookup(&key) {
326-
panic!(
327-
"fed query later has its value computed. The already cached value: {cached_result:?}"
328-
);
329-
330-
310+
}
331311

312+
let current_job_id = qcx.current_query_job();
332313

314+
match lock.entry(key) {
315+
Entry::Vacant(entry) => {
316+
// Nothing has computed or is computing the query, so we start a new job and insert it in the
317+
// state map.
318+
let id = qcx.next_job_id();
319+
let job = QueryJob::new(id, span, current_job_id);
320+
entry.insert(QueryResult::Started(job));
333321

322+
// Drop the lock before we start executing the query
323+
drop(state_lock);
334324

325+
execute_job(query, qcx, state, key, id, dep_node)
326+
}
327+
Entry::Occupied(mut entry) => {
328+
match entry.get_mut() {
329+
#[cfg(not(parallel_compiler))]
330+
QueryResult::Started(job) => {
331+
let id = job.id;
332+
drop(state_lock);
335333

334+
// If we are single-threaded we know that we have cycle error,
335+
// so we just return the error.
336+
cycle_error(query, qcx, id, span)
336337
}
337338
#[cfg(parallel_compiler)]
338339
QueryResult::Started(job) => {
340+
if std::intrinsics::likely(!rustc_data_structures::sync::active()) {
341+
let id = job.id;
342+
drop(state_lock);
343+
344+
// If we are single-threaded we know that we have cycle error,
345+
// so we just return the error.
346+
return cycle_error(query, qcx, id, span);
347+
}
339348
// Get the latch out
340349
let latch = job.latch();
341350
drop(state_lock);
@@ -345,30 +354,41 @@ where
345354
QueryResult::Poisoned => FatalError.raise(),
346355
}
347356
}
348-
TryGetJob::Cycle(error) => {
349-
let result = mk_cycle(qcx, error, query.handle_cycle_error());
350-
(result, None)
357+
}
358+
}
359+
360+
#[inline(always)]
361+
fn execute_job<Q, Qcx>(
362+
query: Q,
363+
qcx: Qcx,
364+
state: &QueryState<Q::Key, Qcx::DepKind>,
365+
key: Q::Key,
366+
id: QueryJobId,
367+
dep_node: Option<DepNode<Qcx::DepKind>>,
368+
) -> (Q::Value, Option<DepNodeIndex>)
369+
where
370+
Q: QueryConfig<Qcx>,
371+
Qcx: QueryContext,
372+
{
373+
// Use `JobOwner` so the query will be poisoned if executing it panics.
374+
let job_owner = JobOwner { state, key };
375+
376+
let (result, dep_node_index) = match qcx.dep_context().dep_graph().data() {
377+
None => execute_job_non_incr(query, qcx, key, id),
378+
Some(data) => execute_job_incr(query, qcx, data, key, dep_node, id),
379+
};
380+
381+
let cache = query.query_cache(qcx);
382+
if query.feedable() {
383+
// We should not compute queries that also got a value via feeding.
384+
// This can't happen, as query feeding adds the very dependencies to the fed query
385+
// as its feeding query had. So if the fed query is red, so is its feeder, which will
386+
// get evaluated first, and re-feed the query.
387+
if let Some((cached_result, _)) = cache.lookup(&key) {
388+
panic!(
389+
"fed query later has its value computed. The already cached value: {cached_result:?}"
390+
);
351391
}
352-
#[cfg(parallel_compiler)]
353-
TryGetJob::JobWait(current_job_id, query_blocked_prof_timer, latch) => {
354-
// With parallel queries we might just have to wait on some other
355-
// thread.
356-
let result = latch.wait_on(current_job_id, span);
357-
match result {
358-
Ok(()) => {
359-
let Some((v, index)) = query.query_cache(qcx).lookup(&key) else {
360-
panic!("value must be in cache after waiting")
361-
};
362-
qcx.dep_context().profiler().query_cache_hit(index.into());
363-
query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
364-
365-
(v, Some(index))
366-
}
367-
Err(error) => {
368-
let result = mk_cycle(qcx, error, query.handle_cycle_error());
369-
(result, None)
370-
}
371-
} }
372392
}
373393
job_owner.complete(cache, result, dep_node_index);
374394

src/bootstrap/config.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -1126,7 +1126,7 @@ impl Config {
11261126
set(&mut config.use_lld, rust.use_lld);
11271127
set(&mut config.lld_enabled, rust.lld);
11281128
set(&mut config.llvm_tools_enabled, rust.llvm_tools);
1129-
config.rustc_parallel = rust.parallel_compiler.unwrap_or(false);
1129+
config.rustc_parallel = rust.parallel_compiler.unwrap_or(true);
11301130
config.rustc_default_linker = rust.default_linker;
11311131
config.musl_root = rust.musl_root.map(PathBuf::from);
11321132
config.save_toolstates = rust.save_toolstates.map(PathBuf::from);

src/librustdoc/clean/utils.rs

+6
Original file line numberDiff line numberDiff line change
@@ -470,6 +470,12 @@ pub(crate) fn get_auto_trait_and_blanket_impls(
470470
cx: &mut DocContext<'_>,
471471
item_def_id: DefId,
472472
) -> impl Iterator<Item = Item> {
473+
// FIXME: To be removed once `parallel_compiler` bugs are fixed!
474+
// More information in <https://github.com/rust-lang/rust/pull/106930>.
475+
if cfg!(parallel_compiler) {
476+
return vec![].into_iter().chain(vec![].into_iter());
477+
}
478+
473479
let auto_impls = cx
474480
.sess()
475481
.prof

0 commit comments

Comments
 (0)