diff --git a/compiler/rustc_driver_impl/src/lib.rs b/compiler/rustc_driver_impl/src/lib.rs index 8fec629161eac..bdaed5bb42ee1 100644 --- a/compiler/rustc_driver_impl/src/lib.rs +++ b/compiler/rustc_driver_impl/src/lib.rs @@ -353,16 +353,16 @@ pub fn run_compiler(at_args: &[String], callbacks: &mut (dyn Callbacks + Send)) }) } -fn dump_feature_usage_metrics(tcxt: TyCtxt<'_>, metrics_dir: &Path) { - let hash = tcxt.crate_hash(LOCAL_CRATE); - let crate_name = tcxt.crate_name(LOCAL_CRATE); +fn dump_feature_usage_metrics(tcx: TyCtxt<'_>, metrics_dir: &Path) { + let hash = tcx.crate_hash(LOCAL_CRATE); + let crate_name = tcx.crate_name(LOCAL_CRATE); let metrics_file_name = format!("unstable_feature_usage_metrics-{crate_name}-{hash}.json"); let metrics_path = metrics_dir.join(metrics_file_name); - if let Err(error) = tcxt.features().dump_feature_usage_metrics(metrics_path) { + if let Err(error) = tcx.features().dump_feature_usage_metrics(metrics_path) { // FIXME(yaahc): once metrics can be enabled by default we will want "failure to emit // default metrics" to only produce a warning when metrics are enabled by default and emit // an error only when the user manually enables metrics - tcxt.dcx().emit_err(UnstableFeatureUsage { error }); + tcx.dcx().emit_err(UnstableFeatureUsage { error }); } } diff --git a/compiler/rustc_interface/src/limits.rs b/compiler/rustc_interface/src/limits.rs index e0fc91f3b723b..8ae0743886ce5 100644 --- a/compiler/rustc_interface/src/limits.rs +++ b/compiler/rustc_interface/src/limits.rs @@ -11,13 +11,13 @@ use rustc_hir::limit::Limit; use rustc_hir::{Attribute, find_attr}; use rustc_middle::query::Providers; -use rustc_session::Limits; +use rustc_session::{Limits, Session}; pub(crate) fn provide(providers: &mut Providers) { providers.limits = |tcx, ()| { let attrs = tcx.hir_krate_attrs(); Limits { - recursion_limit: get_recursion_limit(tcx.hir_krate_attrs()), + recursion_limit: get_recursion_limit(tcx.hir_krate_attrs(), tcx.sess), move_size_limit: find_attr!(attrs, MoveSizeLimit { limit, .. } => *limit) .unwrap_or(Limit::new(tcx.sess.opts.unstable_opts.move_size_limit.unwrap_or(0))), type_length_limit: find_attr!(attrs, TypeLengthLimit { limit, .. } => *limit) @@ -30,6 +30,13 @@ pub(crate) fn provide(providers: &mut Providers) { } // This one is separate because it must be read prior to macro expansion. -pub(crate) fn get_recursion_limit(attrs: &[Attribute]) -> Limit { - find_attr!(attrs, RecursionLimit { limit, .. } => *limit).unwrap_or(Limit::new(128)) +pub(crate) fn get_recursion_limit(attrs: &[Attribute], sess: &Session) -> Limit { + let limit_from_crate = + find_attr!(attrs, RecursionLimit { limit, .. } => limit.0).unwrap_or(128); + Limit::new( + sess.opts + .unstable_opts + .min_recursion_limit + .map_or(limit_from_crate, |min| min.max(limit_from_crate)), + ) } diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs index a280b2a2a6bf7..5cf4b1546320f 100644 --- a/compiler/rustc_interface/src/passes.rs +++ b/compiler/rustc_interface/src/passes.rs @@ -1429,5 +1429,5 @@ fn get_recursion_limit(krate_attrs: &[ast::Attribute], sess: &Session) -> Limit // So, no lints here to avoid duplicates. ShouldEmit::EarlyFatal { also_emit_lints: false }, ); - crate::limits::get_recursion_limit(attr.as_slice()) + crate::limits::get_recursion_limit(attr.as_slice(), sess) } diff --git a/compiler/rustc_interface/src/tests.rs b/compiler/rustc_interface/src/tests.rs index 88056a0db966d..6c2924dfe9bd8 100644 --- a/compiler/rustc_interface/src/tests.rs +++ b/compiler/rustc_interface/src/tests.rs @@ -822,6 +822,7 @@ fn test_unstable_options_tracking_hash() { tracked!(maximal_hir_to_mir_coverage, true); tracked!(merge_functions, Some(MergeFunctions::Disabled)); tracked!(min_function_alignment, Some(Align::EIGHT)); + tracked!(min_recursion_limit, Some(256)); tracked!(mir_emit_retag, true); tracked!(mir_enable_passes, vec![("DestProp".to_string(), false)]); tracked!(mir_opt_level, Some(4)); diff --git a/compiler/rustc_middle/src/dep_graph/graph.rs b/compiler/rustc_middle/src/dep_graph/graph.rs index 963d5f1a53bdd..d410d9a48cd10 100644 --- a/compiler/rustc_middle/src/dep_graph/graph.rs +++ b/compiler/rustc_middle/src/dep_graph/graph.rs @@ -292,7 +292,7 @@ impl DepGraph { pub fn with_anon_task<'tcx, OP, R>( &self, - cx: TyCtxt<'tcx>, + tcx: TyCtxt<'tcx>, dep_kind: DepKind, op: OP, ) -> (R, DepNodeIndex) @@ -301,7 +301,7 @@ impl DepGraph { { match self.data() { Some(data) => { - let (result, index) = data.with_anon_task_inner(cx, dep_kind, op); + let (result, index) = data.with_anon_task_inner(tcx, dep_kind, op); self.read_index(index); (result, index) } @@ -379,14 +379,14 @@ impl DepGraphData { /// how to make that work with `anon` in `execute_job_incr`, though. pub fn with_anon_task_inner<'tcx, OP, R>( &self, - cx: TyCtxt<'tcx>, + tcx: TyCtxt<'tcx>, dep_kind: DepKind, op: OP, ) -> (R, DepNodeIndex) where OP: FnOnce() -> R, { - debug_assert!(!cx.is_eval_always(dep_kind)); + debug_assert!(!tcx.is_eval_always(dep_kind)); // Large numbers of reads are common enough here that pre-sizing `read_set` // to 128 actually helps perf on some benchmarks. @@ -865,7 +865,7 @@ impl DepGraph { dep_node_debug.borrow_mut().insert(dep_node, debug_str); } - pub fn dep_node_debug_str(&self, dep_node: DepNode) -> Option { + pub(crate) fn dep_node_debug_str(&self, dep_node: DepNode) -> Option { self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned() } @@ -1103,7 +1103,7 @@ impl DepGraph { } } - pub fn finish_encoding(&self) -> FileEncodeResult { + pub(crate) fn finish_encoding(&self) -> FileEncodeResult { if let Some(data) = &self.data { data.current.encoder.finish(&data.current) } else { Ok(0) } } diff --git a/compiler/rustc_middle/src/queries.rs b/compiler/rustc_middle/src/queries.rs index 25ac06a6ed099..d4d9b9c0189d9 100644 --- a/compiler/rustc_middle/src/queries.rs +++ b/compiler/rustc_middle/src/queries.rs @@ -23,26 +23,10 @@ //! ## Query Modifiers //! //! Query modifiers are special flags that alter the behavior of a query. They are parsed and processed by the `rustc_macros` -//! The main modifiers are: //! -//! - `desc { ... }`: Sets the human-readable description for diagnostics and profiling. Required -//! for every query. The block should contain a `format!`-style string literal followed by -//! optional arguments. The query key identifier is available for use within the block, as is -//! `tcx`. -//! - `arena_cache`: Use an arena for in-memory caching of the query result. -//! - `cache_on_disk_if { ... }`: Cache the query result to disk if the provided block evaluates to -//! true. The query key identifier is available for use within the block, as is `tcx`. -//! - `cycle_delay_bug`: If a dependency cycle is detected, emit a delayed bug instead of aborting immediately. -//! - `no_hash`: Do not hash the query result for incremental compilation; just mark as dirty if recomputed. -//! - `anon`: Make the query anonymous in the dependency graph (no dep node is created). -//! - `eval_always`: Always evaluate the query, ignoring its dependencies and cached results. -//! - `depth_limit`: Impose a recursion depth limit on the query to prevent stack overflows. -//! - `separate_provide_extern`: Use separate provider functions for local and external crates. -//! - `feedable`: Allow the query result to be set from another query ("fed" externally). +//! For the list of modifiers, see [`rustc_middle::query::modifiers`]. //! -//! For the up-to-date list, see the `QueryModifiers` struct in -//! [`rustc_macros/src/query.rs`](https://github.com/rust-lang/rust/blob/HEAD/compiler/rustc_macros/src/query.rs) -//! and for more details in incremental compilation, see the +//! For more details on incremental compilation, see the //! [Query modifiers in incremental compilation](https://rustc-dev-guide.rust-lang.org/queries/incremental-compilation-in-detail.html#query-modifiers) section of the rustc-dev-guide. //! //! ## Query Expansion and Code Generation diff --git a/compiler/rustc_middle/src/query/erase.rs b/compiler/rustc_middle/src/query/erase.rs index fc96566b069a1..65c6b7551bc63 100644 --- a/compiler/rustc_middle/src/query/erase.rs +++ b/compiler/rustc_middle/src/query/erase.rs @@ -15,7 +15,6 @@ use rustc_span::{ErrorGuaranteed, Spanned}; use crate::mir::interpret::EvalToValTreeResult; use crate::mir::mono::{MonoItem, NormalizationErrorInMono}; use crate::traits::solve; -use crate::ty::adjustment::CoerceUnsizedInfo; use crate::ty::{self, Ty, TyCtxt}; use crate::{mir, traits}; @@ -160,10 +159,6 @@ impl Erasable for Result>, rustc_errors::ErrorGuaranteed [u8; size_of::>, rustc_errors::ErrorGuaranteed>>()]; } -impl Erasable for Result { - type Storage = [u8; size_of::>()]; -} - impl Erasable for Result>>, rustc_errors::ErrorGuaranteed> { @@ -194,10 +189,6 @@ impl Erasable for Result, mir::interpret::ErrorHandled> { [u8; size_of::, mir::interpret::ErrorHandled>>()]; } -impl Erasable for Result { - type Storage = [u8; size_of::>()]; -} - impl Erasable for Option<(mir::ConstValue, Ty<'_>)> { type Storage = [u8; size_of::)>>()]; } @@ -337,6 +328,8 @@ impl_erasable_for_simple_types! { Result<(), rustc_errors::ErrorGuaranteed>, Result<(), rustc_middle::traits::query::NoSolution>, Result, + Result, + Result, rustc_abi::ReprOptions, rustc_ast::expand::allocator::AllocatorKind, rustc_hir::DefaultBodyStability, diff --git a/compiler/rustc_middle/src/query/modifiers.rs b/compiler/rustc_middle/src/query/modifiers.rs index eb9fc330a2316..100d1ac527693 100644 --- a/compiler/rustc_middle/src/query/modifiers.rs +++ b/compiler/rustc_middle/src/query/modifiers.rs @@ -4,43 +4,59 @@ //! modifier names in the query list, and to allow find-all-references to list //! all queries that use a particular modifier. #![allow(unused, non_camel_case_types)] -// FIXME: Update and clarify documentation for these modifiers. // tidy-alphabetical-start // /// # `anon` query modifier /// -/// Generate a dep node based on the dependencies of the query +/// Generate a dep node based not on the query key, but on the query's dependencies. pub(crate) struct anon; /// # `arena_cache` query modifier /// -/// Use this type for the in-memory cache. +/// Query return values must impl `Copy` and be small, but some queries must return values that +/// doesn't meet those criteria. Queries marked with this modifier have their values allocated in +/// an arena and the query returns a reference to the value. There are two cases. +/// - If the provider function returns `T` then the query will return `&'tcx T`. +/// - If the provider function returns `Option` then the query will return `Option<&'tcx T>`. +/// +/// The query plumbing takes care of the arenas and the type manipulations. pub(crate) struct arena_cache; -/// # `cache_on_disk_if` query modifier +/// # `cache_on_disk_if { ... }` query modifier /// -/// Cache the query to disk if the `Block` returns true. +/// Cache the query result to disk if the provided block evaluates to true. The query key +/// identifier is available for use within the block, as is `tcx`. pub(crate) struct cache_on_disk_if; /// # `cycle_delay_bug` query modifier /// -/// A cycle error results in a delay_bug call +/// If a dependency cycle is detected, emit a delayed bug instead of a normal error. pub(crate) struct cycle_delay_bug; /// # `depth_limit` query modifier /// -/// Whether the query has a call depth limit +/// Impose a recursion call depth limit on the query to prevent stack overflow. pub(crate) struct depth_limit; -/// # `desc` query modifier +/// # `desc { ... }` query modifier /// -/// The description of the query. This modifier is required on every query. +/// The human-readable description of the query, for diagnostics and profiling. Required for every +/// query. The block should contain a `format!`-style string literal followed by optional +/// arguments. The query key identifier is available for use within the block, as is `tcx`. pub(crate) struct desc; /// # `eval_always` query modifier /// -/// Always evaluate the query, ignoring its dependencies +/// Queries with this modifier do not track their dependencies, and are treated as always having a +/// red (dirty) dependency instead. This is necessary for queries that interact with state that +/// isn't tracked by the query system. +/// +/// It can also improve performance for queries that are so likely to be dirtied by any change that +/// it's not worth tracking their actual dependencies at all. +/// +/// As with all queries, the return value is still cached in memory for the rest of the compiler +/// session. pub(crate) struct eval_always; /// # `feedable` query modifier @@ -50,12 +66,13 @@ pub(crate) struct feedable; /// # `no_hash` query modifier /// -/// Don't hash the result, instead just mark a query red if it runs +/// Do not hash the query's return value for incremental compilation. If the value needs to be +/// recomputed, always mark its node as red (dirty). pub(crate) struct no_hash; /// # `separate_provide_extern` query modifier /// -/// Use a separate query provider for local and extern crates +/// Use separate query provider functions for local and extern crates. pub(crate) struct separate_provide_extern; // tidy-alphabetical-end diff --git a/compiler/rustc_middle/src/query/plumbing.rs b/compiler/rustc_middle/src/query/plumbing.rs index 33c38adcef058..366a42a431d49 100644 --- a/compiler/rustc_middle/src/query/plumbing.rs +++ b/compiler/rustc_middle/src/query/plumbing.rs @@ -322,7 +322,6 @@ macro_rules! define_callbacks { non_queries { $($_:tt)* } ) => { $( - #[allow(unused_lifetimes)] pub mod $name { use super::*; use $crate::query::erase::{self, Erased}; diff --git a/compiler/rustc_query_impl/src/execution.rs b/compiler/rustc_query_impl/src/execution.rs index d8dc004f1cf7a..e59ea51ddbe8e 100644 --- a/compiler/rustc_query_impl/src/execution.rs +++ b/compiler/rustc_query_impl/src/execution.rs @@ -240,6 +240,7 @@ fn wait_for_query<'tcx, C: QueryCache>( tcx: TyCtxt<'tcx>, span: Span, key: C::Key, + key_hash: u64, latch: QueryLatch<'tcx>, current: Option, ) -> (C::Value, Option) { @@ -248,8 +249,7 @@ fn wait_for_query<'tcx, C: QueryCache>( // self-profiler. let query_blocked_prof_timer = tcx.prof.query_blocked(); - // With parallel queries we might just have to wait on some other - // thread. + // With parallel queries we might just have to wait on some other thread. let result = latch.wait_on(tcx, current, span); match result { @@ -258,7 +258,6 @@ fn wait_for_query<'tcx, C: QueryCache>( outline(|| { // We didn't find the query result in the query cache. Check if it was // poisoned due to a panic instead. - let key_hash = sharded::make_hash(&key); let shard = query.state.active.lock_shard_by_hash(key_hash); match shard.find(key_hash, equivalent_key(key)) { // The query we waited on panicked. Continue unwinding here. @@ -311,16 +310,37 @@ fn try_execute_query<'tcx, C: QueryCache, const INCR: bool>( match state_lock.entry(key_hash, equivalent_key(key), |(k, _)| sharded::make_hash(k)) { Entry::Vacant(entry) => { - // Nothing has computed or is computing the query, so we start a new job and insert it in the - // state map. + // Nothing has computed or is computing the query, so we start a new job and insert it + // in the state map. let id = next_job_id(tcx); let job = QueryJob::new(id, span, current_job_id); entry.insert((key, ActiveKeyStatus::Started(job))); - // Drop the lock before we start executing the query + // Drop the lock before we start executing the query. drop(state_lock); - execute_job::(query, tcx, key, key_hash, id, dep_node) + // Set up a guard object that will automatically poison the query if a + // panic occurs while executing the query (or any intermediate plumbing). + let job_guard = ActiveJobGuard { state: &query.state, key, key_hash }; + + debug_assert_eq!(tcx.dep_graph.is_fully_enabled(), INCR); + + // Delegate to another function to actually execute the query job. + let (value, dep_node_index) = if INCR { + execute_job_incr(query, tcx, key, dep_node, id) + } else { + execute_job_non_incr(query, tcx, key, id) + }; + + if query.feedable { + check_feedable(tcx, query, key, &value); + } + + // Tell the guard to insert `value` in the cache and remove the status entry from + // `query.state`. + job_guard.complete(&query.cache, value, dep_node_index); + + (value, Some(dep_node_index)) } Entry::Occupied(mut entry) => { match &mut entry.get_mut().1 { @@ -332,7 +352,7 @@ fn try_execute_query<'tcx, C: QueryCache, const INCR: bool>( // Only call `wait_for_query` if we're using a Rayon thread pool // as it will attempt to mark the worker thread as blocked. - wait_for_query(query, tcx, span, key, latch, current_job_id) + wait_for_query(query, tcx, span, key, key_hash, latch, current_job_id) } else { let id = job.id; drop(state_lock); @@ -349,67 +369,44 @@ fn try_execute_query<'tcx, C: QueryCache, const INCR: bool>( } #[inline(always)] -fn execute_job<'tcx, C: QueryCache, const INCR: bool>( - query: &'tcx QueryVTable<'tcx, C>, +fn check_feedable<'tcx, C: QueryCache>( tcx: TyCtxt<'tcx>, + query: &'tcx QueryVTable<'tcx, C>, key: C::Key, - key_hash: u64, - id: QueryJobId, - dep_node: Option, -) -> (C::Value, Option) { - // Set up a guard object that will automatically poison the query if a - // panic occurs while executing the query (or any intermediate plumbing). - let job_guard = ActiveJobGuard { state: &query.state, key, key_hash }; - - debug_assert_eq!(tcx.dep_graph.is_fully_enabled(), INCR); - - // Delegate to another function to actually execute the query job. - let (value, dep_node_index) = if INCR { - execute_job_incr(query, tcx, key, dep_node, id) - } else { - execute_job_non_incr(query, tcx, key, id) - }; - - let cache = &query.cache; - if query.feedable { - // We should not compute queries that also got a value via feeding. - // This can't happen, as query feeding adds the very dependencies to the fed query - // as its feeding query had. So if the fed query is red, so is its feeder, which will - // get evaluated first, and re-feed the query. - if let Some((cached_value, _)) = cache.lookup(&key) { - let Some(hash_value_fn) = query.hash_value_fn else { - panic!( - "no_hash fed query later has its value computed.\n\ - Remove `no_hash` modifier to allow recomputation.\n\ - The already cached value: {}", - (query.format_value)(&cached_value) - ); - }; + value: &C::Value, +) { + // We should not compute queries that also got a value via feeding. + // This can't happen, as query feeding adds the very dependencies to the fed query + // as its feeding query had. So if the fed query is red, so is its feeder, which will + // get evaluated first, and re-feed the query. + if let Some((cached_value, _)) = query.cache.lookup(&key) { + let Some(hash_value_fn) = query.hash_value_fn else { + panic!( + "no_hash fed query later has its value computed.\n\ + Remove `no_hash` modifier to allow recomputation.\n\ + The already cached value: {}", + (query.format_value)(&cached_value) + ); + }; - let (old_hash, new_hash) = tcx.with_stable_hashing_context(|mut hcx| { - (hash_value_fn(&mut hcx, &cached_value), hash_value_fn(&mut hcx, &value)) - }); - let formatter = query.format_value; - if old_hash != new_hash { - // We have an inconsistency. This can happen if one of the two - // results is tainted by errors. - assert!( - tcx.dcx().has_errors().is_some(), - "Computed query value for {:?}({:?}) is inconsistent with fed value,\n\ - computed={:#?}\nfed={:#?}", - query.dep_kind, - key, - formatter(&value), - formatter(&cached_value), - ); - } + let (old_hash, new_hash) = tcx.with_stable_hashing_context(|mut hcx| { + (hash_value_fn(&mut hcx, &cached_value), hash_value_fn(&mut hcx, value)) + }); + let formatter = query.format_value; + if old_hash != new_hash { + // We have an inconsistency. This can happen if one of the two + // results is tainted by errors. + assert!( + tcx.dcx().has_errors().is_some(), + "Computed query value for {:?}({:?}) is inconsistent with fed value,\n\ + computed={:#?}\nfed={:#?}", + query.dep_kind, + key, + formatter(value), + formatter(&cached_value), + ); } } - - // Tell the guard to perform completion bookkeeping, and also to not poison the query. - job_guard.complete(cache, value, dep_node_index); - - (value, Some(dep_node_index)) } // Fast path for when incr. comp. is off. diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs index 74b3aa11d0d80..0bc432a0ff06c 100644 --- a/compiler/rustc_session/src/options.rs +++ b/compiler/rustc_session/src/options.rs @@ -2456,6 +2456,8 @@ options! { "the directory metrics emitted by rustc are dumped into (implicitly enables default set of metrics)"), min_function_alignment: Option = (None, parse_align, [TRACKED], "align all functions to at least this many bytes. Must be a power of 2"), + min_recursion_limit: Option = (None, parse_opt_number, [TRACKED], + "set a minimum recursion limit (final limit = max(this, recursion_limit_from_crate))"), mir_emit_retag: bool = (false, parse_bool, [TRACKED], "emit Retagging MIR statements, interpreted e.g., by miri; implies -Zmir-opt-level=0 \ (default: no)"), diff --git a/library/alloc/src/collections/vec_deque/splice.rs b/library/alloc/src/collections/vec_deque/splice.rs index d7b9a96291c39..b82f9fba7ceb3 100644 --- a/library/alloc/src/collections/vec_deque/splice.rs +++ b/library/alloc/src/collections/vec_deque/splice.rs @@ -138,8 +138,48 @@ impl Drain<'_, T, A> { /// self.deque must be valid. unsafe fn move_tail(&mut self, additional: usize) { let deque = unsafe { self.deque.as_mut() }; - let tail_start = deque.len + self.drain_len; - deque.buf.reserve(tail_start + self.tail_len, additional); + + // `Drain::new` modifies the deque's len (so does `Drain::fill` here) + // directly with the start bound of the range passed into + // `VecDeque::splice`. This causes a few different issue: + // - Most notably, there will be a hole at the end of the + // buffer when our buffer resizes in the case that our + // data wraps around. + // - We cannot use `VecDeque::reserve` directly because + // how it reserves more space and updates the `VecDeque`'s + // `head` field accordingly depends on the `VecDeque`'s + // actual `len`. + // - We cannot just directly modify `VecDeque`'s `len` and + // and call `VecDeque::reserve` afterward because if + // `VecDeque::reserve` panics on capacity overflow, + // well now our `VecDeque`'s head does not get updated + // and we still have a potential hole at the end of the + // buffer. + // Therefore, we manually reserve additional space (if necessary) + // based on calculating the actual `len` of the `VecDeque` and adjust + // `VecDeque`'s len right *after* the panicking region of `VecDeque::reserve` + // (that is `RawVec` `reserve()` call) + + let drain_start = deque.len; + let tail_start = drain_start + self.drain_len; + + // Actual VecDeque's len = drain_start + tail_len + drain_len + let actual_len = drain_start + self.tail_len + self.drain_len; + let new_cap = actual_len.checked_add(additional).expect("capacity overflow"); + let old_cap = deque.capacity(); + + if new_cap > old_cap { + deque.buf.reserve(actual_len, additional); + // If new_cap doesn't panic, we can safely set the `VecDeque` len to its + // actual len; this needs to be done in order to set deque.head correctly + // on `VecDeque::handle_capacity_increase` + deque.len = actual_len; + // SAFETY: this cannot panic since our internal buffer's new_cap should + // be bigger than the passed in old_cap + unsafe { + deque.handle_capacity_increase(old_cap); + } + } let new_tail_start = tail_start + additional; unsafe { @@ -149,6 +189,9 @@ impl Drain<'_, T, A> { self.tail_len, ); } + + // revert the `VecDeque` len to what it was before + deque.len = drain_start; self.drain_len += additional; } } diff --git a/library/alloctests/tests/vec_deque.rs b/library/alloctests/tests/vec_deque.rs index 92853fe00fd63..91843dfd00585 100644 --- a/library/alloctests/tests/vec_deque.rs +++ b/library/alloctests/tests/vec_deque.rs @@ -2347,3 +2347,16 @@ fn test_splice_wrapping() { assert_eq!(Vec::from(vec), [7, 8, 9]); } + +#[test] +fn test_splice_wrapping_and_resize() { + let mut vec = VecDeque::new(); + + for i in [1; 6] { + vec.push_front(i); + } + + vec.splice(1..1, [2, 3, 4]); + + assert_eq!(Vec::from(vec), [1, 2, 3, 4, 1, 1, 1, 1, 1]) +} diff --git a/src/doc/rustc-dev-guide/src/query.md b/src/doc/rustc-dev-guide/src/query.md index 1621d79db247a..680393a6d4f32 100644 --- a/src/doc/rustc-dev-guide/src/query.md +++ b/src/doc/rustc-dev-guide/src/query.md @@ -168,12 +168,6 @@ pub fn provide(providers: &mut rustc_middle::util::Providers) { } ``` -Note that `util::Providers` implements `DerefMut` to `query::Providers` so callers of the `provide` functions can pass in a `util::Providers` and it will just work for provider functions that accept `query::Providers` too - -- This function takes a mutable reference to the `query::Providers` struct and sets the fields to point to the correct provider functions. -- You can also assign queries individually, e.g. `providers.type_of = type_of;`. -- You can assign fields individually for each provider type (local, external, and hooks). - #### Adding a new provider Suppose you want to add a new query called `fubar`. diff --git a/src/doc/unstable-book/src/compiler-flags/min-recursive-limit.md b/src/doc/unstable-book/src/compiler-flags/min-recursive-limit.md new file mode 100644 index 0000000000000..c00cdb95c0320 --- /dev/null +++ b/src/doc/unstable-book/src/compiler-flags/min-recursive-limit.md @@ -0,0 +1,12 @@ +# `min-recursion-limit` + +This flag sets a minimum recursion limit for the compiler. The final recursion limit is calculated as `max(min_recursion_limit, recursion_limit_from_crate)`. This cannot ever lower the recursion limit. Unless the current crate has an explicitly low `recursion_limit` attribute, any value less than the current default does not have an effect. + +The recursion limit affects (among other things): + +- macro expansion +- the trait solver +- const evaluation +- query depth + +This flag is particularly useful when using the next trait solver (`-Z next-solver`), which may require a higher recursion limit for crates that were compiled successfully with the old solver. diff --git a/src/librustdoc/lib.rs b/src/librustdoc/lib.rs index 070b1668a3e1b..6718505bdefd9 100644 --- a/src/librustdoc/lib.rs +++ b/src/librustdoc/lib.rs @@ -954,15 +954,15 @@ fn main_args(early_dcx: &mut EarlyDiagCtxt, at_args: &[String]) { }) } -fn dump_feature_usage_metrics(tcxt: TyCtxt<'_>, metrics_dir: &Path) { - let hash = tcxt.crate_hash(LOCAL_CRATE); - let crate_name = tcxt.crate_name(LOCAL_CRATE); +fn dump_feature_usage_metrics(tcx: TyCtxt<'_>, metrics_dir: &Path) { + let hash = tcx.crate_hash(LOCAL_CRATE); + let crate_name = tcx.crate_name(LOCAL_CRATE); let metrics_file_name = format!("unstable_feature_usage_metrics-{crate_name}-{hash}.json"); let metrics_path = metrics_dir.join(metrics_file_name); - if let Err(error) = tcxt.features().dump_feature_usage_metrics(metrics_path) { + if let Err(error) = tcx.features().dump_feature_usage_metrics(metrics_path) { // FIXME(yaahc): once metrics can be enabled by default we will want "failure to emit // default metrics" to only produce a warning when metrics are enabled by default and emit // an error only when the user manually enables metrics - tcxt.dcx().err(format!("cannot emit feature usage metrics: {error}")); + tcx.dcx().err(format!("cannot emit feature usage metrics: {error}")); } } diff --git a/tests/ui/recursion/recursion_limit/min-recursion-limit-attr-lower-than-default.rs b/tests/ui/recursion/recursion_limit/min-recursion-limit-attr-lower-than-default.rs new file mode 100644 index 0000000000000..d9de45b5735e4 --- /dev/null +++ b/tests/ui/recursion/recursion_limit/min-recursion-limit-attr-lower-than-default.rs @@ -0,0 +1,20 @@ +//@ compile-flags: -Z min-recursion-limit=0 +//@ check-pass + +// Checks that `min-recursion-limit` cannot lower the default recursion limit + +macro_rules! count { + () => {}; + ($_:tt $($rest:tt)*) => { count!($($rest)*) }; +} + +fn main() { + // 100 + count!( + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + ); +} diff --git a/tests/ui/recursion/recursion_limit/min-recursion-limit-attr-wins.rs b/tests/ui/recursion/recursion_limit/min-recursion-limit-attr-wins.rs new file mode 100644 index 0000000000000..44c9f76f3785b --- /dev/null +++ b/tests/ui/recursion/recursion_limit/min-recursion-limit-attr-wins.rs @@ -0,0 +1,24 @@ +//@ compile-flags: -Z min-recursion-limit=256 +//@ check-pass +#![recursion_limit = "128"] + +macro_rules! count { + () => {}; + ($_:tt $($rest:tt)*) => { count!($($rest)*) }; +} + +fn main() { + // 200 + count!( + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + ); +} diff --git a/tests/ui/recursion/recursion_limit/min-recursion-limit-cli-wins.rs b/tests/ui/recursion/recursion_limit/min-recursion-limit-cli-wins.rs new file mode 100644 index 0000000000000..10dc9cfe06b2f --- /dev/null +++ b/tests/ui/recursion/recursion_limit/min-recursion-limit-cli-wins.rs @@ -0,0 +1,24 @@ +//@ compile-flags: -Z min-recursion-limit=64 +//@ check-pass +#![recursion_limit = "256"] + +macro_rules! count { + () => {}; + ($_:tt $($rest:tt)*) => { count!($($rest)*) }; +} + +fn main() { + // 200 + count!( + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + ); +} diff --git a/tests/ui/recursion/recursion_limit/min-recursion-limit-no-attr.rs b/tests/ui/recursion/recursion_limit/min-recursion-limit-no-attr.rs new file mode 100644 index 0000000000000..f918ec86c1411 --- /dev/null +++ b/tests/ui/recursion/recursion_limit/min-recursion-limit-no-attr.rs @@ -0,0 +1,23 @@ +//@ compile-flags: -Z min-recursion-limit=256 +//@ check-pass + +macro_rules! count { + () => {}; + ($_:tt $($rest:tt)*) => { count!($($rest)*) }; +} + +fn main() { + // 200 + count!( + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + a a a a a a a a a a a a a a a a a a a a + ); +}