Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rollup of 7 pull requests #128148

Closed
wants to merge 16 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
16 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion Cargo.lock
Original file line number Diff line number Diff line change
Expand Up @@ -3882,7 +3882,6 @@ dependencies = [
"termcolor",
"termize",
"tracing",
"unicode-width",
"windows",
]

Expand Down
1 change: 0 additions & 1 deletion compiler/rustc_errors/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ serde_json = "1.0.59"
termcolor = "1.2.0"
termize = "0.1.1"
tracing = "0.1"
unicode-width = "0.1.4"
# tidy-alphabetical-end

[target.'cfg(windows)'.dependencies.windows]
Expand Down
77 changes: 51 additions & 26 deletions compiler/rustc_errors/src/emitter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
//! The output types are defined in `rustc_session::config::ErrorOutputType`.

use rustc_span::source_map::SourceMap;
use rustc_span::{FileLines, FileName, SourceFile, Span};
use rustc_span::{char_width, FileLines, FileName, SourceFile, Span};

use crate::snippet::{
Annotation, AnnotationColumn, AnnotationType, Line, MultilineAnnotation, Style, StyledString,
Expand Down Expand Up @@ -677,10 +677,7 @@ impl HumanEmitter {
.skip(left)
.take_while(|ch| {
// Make sure that the trimming on the right will fall within the terminal width.
// FIXME: `unicode_width` sometimes disagrees with terminals on how wide a `char`
// is. For now, just accept that sometimes the code line will be longer than
// desired.
let next = unicode_width::UnicodeWidthChar::width(*ch).unwrap_or(1);
let next = char_width(*ch);
if taken + next > right - left {
return false;
}
Expand Down Expand Up @@ -742,11 +739,7 @@ impl HumanEmitter {
let left = margin.left(source_string.len());

// Account for unicode characters of width !=0 that were removed.
let left = source_string
.chars()
.take(left)
.map(|ch| unicode_width::UnicodeWidthChar::width(ch).unwrap_or(1))
.sum();
let left = source_string.chars().take(left).map(|ch| char_width(ch)).sum();

self.draw_line(
buffer,
Expand Down Expand Up @@ -2039,7 +2032,7 @@ impl HumanEmitter {
let sub_len: usize =
if is_whitespace_addition { &part.snippet } else { part.snippet.trim() }
.chars()
.map(|ch| unicode_width::UnicodeWidthChar::width(ch).unwrap_or(1))
.map(|ch| char_width(ch))
.sum();

let offset: isize = offsets
Expand Down Expand Up @@ -2076,11 +2069,8 @@ impl HumanEmitter {
}

// length of the code after substitution
let full_sub_len = part
.snippet
.chars()
.map(|ch| unicode_width::UnicodeWidthChar::width(ch).unwrap_or(1))
.sum::<usize>() as isize;
let full_sub_len =
part.snippet.chars().map(|ch| char_width(ch)).sum::<usize>() as isize;

// length of the code to be substituted
let snippet_len = span_end_pos as isize - span_start_pos as isize;
Expand Down Expand Up @@ -2568,18 +2558,53 @@ fn num_decimal_digits(num: usize) -> usize {
}

// We replace some characters so the CLI output is always consistent and underlines aligned.
// Keep the following list in sync with `rustc_span::char_width`.
const OUTPUT_REPLACEMENTS: &[(char, &str)] = &[
('\t', " "), // We do our own tab replacement
('\t', " "), // We do our own tab replacement
('\u{200D}', ""), // Replace ZWJ with nothing for consistent terminal output of grapheme clusters.
('\u{202A}', ""), // The following unicode text flow control characters are inconsistently
('\u{202B}', ""), // supported across CLIs and can cause confusion due to the bytes on disk
('\u{202D}', ""), // not corresponding to the visible source code, so we replace them always.
('\u{202E}', ""),
('\u{2066}', ""),
('\u{2067}', ""),
('\u{2068}', ""),
('\u{202C}', ""),
('\u{2069}', ""),
('\u{202A}', "�"), // The following unicode text flow control characters are inconsistently
('\u{202B}', "�"), // supported across CLIs and can cause confusion due to the bytes on disk
('\u{202D}', "�"), // not corresponding to the visible source code, so we replace them always.
('\u{202E}', "�"),
('\u{2066}', "�"),
('\u{2067}', "�"),
('\u{2068}', "�"),
('\u{202C}', "�"),
('\u{2069}', "�"),
// In terminals without Unicode support the following will be garbled, but in *all* terminals
// the underlying codepoint will be as well. We could gate this replacement behind a "unicode
// support" gate.
('\u{0000}', "␀"),
('\u{0001}', "␁"),
('\u{0002}', "␂"),
('\u{0003}', "␃"),
('\u{0004}', "␄"),
('\u{0005}', "␅"),
('\u{0006}', "␆"),
('\u{0007}', "␇"),
('\u{0008}', "␈"),
('\u{000B}', "␋"),
('\u{000C}', "␌"),
('\u{000D}', "␍"),
('\u{000E}', "␎"),
('\u{000F}', "␏"),
('\u{0010}', "␐"),
('\u{0011}', "␑"),
('\u{0012}', "␒"),
('\u{0013}', "␓"),
('\u{0014}', "␔"),
('\u{0015}', "␕"),
('\u{0016}', "␖"),
('\u{0017}', "␗"),
('\u{0018}', "␘"),
('\u{0019}', "␙"),
('\u{001A}', "␚"),
('\u{001B}', "␛"),
('\u{001C}', "␜"),
('\u{001D}', "␝"),
('\u{001E}', "␞"),
('\u{001F}', "␟"),
('\u{007F}', "␡"),
];

fn normalize_whitespace(str: &str) -> String {
Expand Down
24 changes: 15 additions & 9 deletions compiler/rustc_hir/src/hir.rs
Original file line number Diff line number Diff line change
Expand Up @@ -763,7 +763,7 @@ impl<'hir> Generics<'hir> {
)
}

fn span_for_predicate_removal(&self, pos: usize) -> Span {
pub fn span_for_predicate_removal(&self, pos: usize) -> Span {
let predicate = &self.predicates[pos];
let span = predicate.span();

Expand Down Expand Up @@ -806,15 +806,21 @@ impl<'hir> Generics<'hir> {
return self.span_for_predicate_removal(predicate_pos);
}

let span = bounds[bound_pos].span();
if bound_pos == 0 {
// where T: ?Sized + Bar, Foo: Bar,
// ^^^^^^^^^
span.to(bounds[1].span().shrink_to_lo())
let bound_span = bounds[bound_pos].span();
if bound_pos < bounds.len() - 1 {
// If there's another bound after the current bound
// include the following '+' e.g.:
//
// `T: Foo + CurrentBound + Bar`
// ^^^^^^^^^^^^^^^
bound_span.to(bounds[bound_pos + 1].span().shrink_to_lo())
} else {
// where T: Bar + ?Sized, Foo: Bar,
// ^^^^^^^^^
bounds[bound_pos - 1].span().shrink_to_hi().to(span)
// If the current bound is the last bound
// include the preceding '+' E.g.:
//
// `T: Foo + Bar + CurrentBound`
// ^^^^^^^^^^^^^^^
bound_span.with_lo(bounds[bound_pos - 1].span().hi())
}
}
}
Expand Down
8 changes: 4 additions & 4 deletions compiler/rustc_hir_typeck/src/method/probe.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1846,7 +1846,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
/// Determine if the associated item with the given DefId matches
/// the desired name via a doc alias.
fn matches_by_doc_alias(&self, def_id: DefId) -> bool {
let Some(name) = self.method_name else {
let Some(method) = self.method_name else {
return false;
};
let Some(local_def_id) = def_id.as_local() else {
Expand All @@ -1863,7 +1863,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
// #[rustc_confusables("foo", "bar"))]
for n in confusables {
if let Some(lit) = n.lit()
&& name.as_str() == lit.symbol.as_str()
&& method.name == lit.symbol
{
return true;
}
Expand All @@ -1883,14 +1883,14 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
// #[doc(alias("foo", "bar"))]
for n in nested {
if let Some(lit) = n.lit()
&& name.as_str() == lit.symbol.as_str()
&& method.name == lit.symbol
{
return true;
}
}
} else if let Some(meta) = v.meta_item()
&& let Some(lit) = meta.name_value_literal()
&& name.as_str() == lit.symbol.as_str()
&& method.name == lit.symbol
{
// #[doc(alias = "foo")]
return true;
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_lint_defs/src/builtin.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1424,7 +1424,7 @@ declare_lint! {
Deny,
"detects missing fragment specifiers in unused `macro_rules!` patterns",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reference: "issue #40107 <https://github.com/rust-lang/rust/issues/40107>",
};
}
Expand Down
2 changes: 0 additions & 2 deletions compiler/rustc_metadata/src/rmeta/decoder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1728,7 +1728,6 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
source_len,
lines,
multibyte_chars,
non_narrow_chars,
normalized_pos,
stable_id,
..
Expand Down Expand Up @@ -1780,7 +1779,6 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
self.cnum,
lines,
multibyte_chars,
non_narrow_chars,
normalized_pos,
source_file_index,
);
Expand Down
69 changes: 49 additions & 20 deletions compiler/rustc_middle/src/ty/diagnostics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -188,31 +188,60 @@ fn suggest_changing_unsized_bound(
continue;
};

for (pos, bound) in predicate.bounds.iter().enumerate() {
let hir::GenericBound::Trait(poly, hir::TraitBoundModifier::Maybe) = bound else {
continue;
};
if poly.trait_ref.trait_def_id() != def_id {
continue;
}
if predicate.origin == PredicateOrigin::ImplTrait && predicate.bounds.len() == 1 {
// For `impl ?Sized` with no other bounds, suggest `impl Sized` instead.
let bound_span = bound.span();
if bound_span.can_be_used_for_suggestions() {
let question_span = bound_span.with_hi(bound_span.lo() + BytePos(1));
suggestions.push((
let unsized_bounds = predicate
.bounds
.iter()
.enumerate()
.filter(|(_, bound)| {
if let hir::GenericBound::Trait(poly, hir::TraitBoundModifier::Maybe) = bound
&& poly.trait_ref.trait_def_id() == def_id
{
true
} else {
false
}
})
.collect::<Vec<_>>();

if unsized_bounds.is_empty() {
continue;
}

let mut push_suggestion = |sp, msg| suggestions.push((sp, String::new(), msg));

if predicate.bounds.len() == unsized_bounds.len() {
// All the bounds are unsized bounds, e.g.
// `T: ?Sized + ?Sized` or `_: impl ?Sized + ?Sized`,
// so in this case:
// - if it's an impl trait predicate suggest changing the
// the first bound to sized and removing the rest
// - Otherwise simply suggest removing the entire predicate
if predicate.origin == PredicateOrigin::ImplTrait {
let first_bound = unsized_bounds[0].1;
let first_bound_span = first_bound.span();
if first_bound_span.can_be_used_for_suggestions() {
let question_span =
first_bound_span.with_hi(first_bound_span.lo() + BytePos(1));
push_suggestion(
question_span,
String::new(),
SuggestChangingConstraintsMessage::ReplaceMaybeUnsizedWithSized,
));
);

for (pos, _) in unsized_bounds.iter().skip(1) {
let sp = generics.span_for_bound_removal(where_pos, *pos);
push_suggestion(sp, SuggestChangingConstraintsMessage::RemoveMaybeUnsized);
}
}
} else {
let sp = generics.span_for_predicate_removal(where_pos);
push_suggestion(sp, SuggestChangingConstraintsMessage::RemoveMaybeUnsized);
}
} else {
// Some of the bounds are other than unsized.
// So push separate removal suggestion for each unsized bound
for (pos, _) in unsized_bounds {
let sp = generics.span_for_bound_removal(where_pos, pos);
suggestions.push((
sp,
String::new(),
SuggestChangingConstraintsMessage::RemoveMaybeUnsized,
));
push_suggestion(sp, SuggestChangingConstraintsMessage::RemoveMaybeUnsized);
}
}
}
Expand Down
6 changes: 0 additions & 6 deletions compiler/rustc_query_system/src/ich/impls_syntax.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ impl<'a> HashStable<StableHashingContext<'a>> for SourceFile {
source_len: _,
lines: _,
ref multibyte_chars,
ref non_narrow_chars,
ref normalized_pos,
} = *self;

Expand All @@ -98,11 +97,6 @@ impl<'a> HashStable<StableHashingContext<'a>> for SourceFile {
char_pos.hash_stable(hcx, hasher);
}

non_narrow_chars.len().hash_stable(hcx, hasher);
for &char_pos in non_narrow_chars.iter() {
char_pos.hash_stable(hcx, hasher);
}

normalized_pos.len().hash_stable(hcx, hasher);
for &char_pos in normalized_pos.iter() {
char_pos.hash_stable(hcx, hasher);
Expand Down
Loading
Loading