Fixes for Rust 1.89

Closes #4287
This commit is contained in:
Damien Elmes 2025-09-01 14:30:40 +10:00
parent 6dd9daf074
commit 71ec878780
28 changed files with 129 additions and 107 deletions

1
Cargo.lock generated
View file

@ -183,6 +183,7 @@ dependencies = [
"itertools 0.14.0",
"num-format",
"phf 0.11.3",
"regex",
"serde",
"serde_json",
"unic-langid",

View file

@ -67,7 +67,7 @@ impl Platform {
}
/// Append .exe to path if on Windows.
pub fn with_exe(path: &str) -> Cow<str> {
pub fn with_exe(path: &str) -> Cow<'_, str> {
if cfg!(windows) {
format!("{path}.exe").into()
} else {

View file

@ -98,7 +98,7 @@ impl BuildAction for YarnInstall<'_> {
}
}
fn with_cmd_ext(bin: &str) -> Cow<str> {
fn with_cmd_ext(bin: &str) -> Cow<'_, str> {
if cfg!(windows) {
format!("{bin}.cmd").into()
} else {

View file

@ -22,6 +22,7 @@ inflections.workspace = true
anki_io.workspace = true
anyhow.workspace = true
itertools.workspace = true
regex.workspace = true
[dependencies]
fluent.workspace = true

View file

@ -4,6 +4,5 @@
// Include auto-generated content
#![allow(clippy::all)]
#![allow(text_direction_codepoint_in_literal)]
include!(concat!(env!("OUT_DIR"), "/strings.rs"));

View file

@ -195,12 +195,30 @@ pub(crate) const {lang_name}: phf::Map<&str, &str> = phf::phf_map! {{",
.unwrap();
for (module, contents) in modules {
writeln!(buf, r###" "{module}" => r##"{contents}"##,"###).unwrap();
let escaped_contents = escape_unicode_control_chars(contents);
writeln!(
buf,
r###" "{module}" => r##"{escaped_contents}"##,"###
)
.unwrap();
}
buf.push_str("};\n");
}
fn escape_unicode_control_chars(input: &str) -> String {
use regex::Regex;
static RE: std::sync::OnceLock<Regex> = std::sync::OnceLock::new();
let re = RE.get_or_init(|| Regex::new(r"[\u{202a}-\u{202e}\u{2066}-\u{2069}]").unwrap());
re.replace_all(input, |caps: &regex::Captures| {
let c = caps.get(0).unwrap().as_str().chars().next().unwrap();
format!("\\u{{{:04x}}}", c as u32)
})
.into_owned()
}
fn lang_constant_name(lang: &str) -> String {
lang.to_ascii_uppercase().replace('-', "_")
}

View file

@ -42,14 +42,14 @@ enum CheckableUrl {
}
impl CheckableUrl {
fn url(&self) -> Cow<str> {
fn url(&self) -> Cow<'_, str> {
match *self {
Self::HelpPage(page) => help_page_to_link(page).into(),
Self::String(s) => s.into(),
}
}
fn anchor(&self) -> Cow<str> {
fn anchor(&self) -> Cow<'_, str> {
match *self {
Self::HelpPage(page) => help_page_link_suffix(page).into(),
Self::String(s) => s.split('#').next_back().unwrap_or_default().into(),

View file

@ -94,7 +94,7 @@ impl BackendCollectionService for Backend {
}
impl Backend {
pub(super) fn lock_open_collection(&self) -> Result<MutexGuard<Option<Collection>>> {
pub(super) fn lock_open_collection(&self) -> Result<MutexGuard<'_, Option<Collection>>> {
let guard = self.col.lock().unwrap();
guard
.is_some()
@ -102,7 +102,7 @@ impl Backend {
.ok_or(AnkiError::CollectionNotOpen)
}
pub(super) fn lock_closed_collection(&self) -> Result<MutexGuard<Option<Collection>>> {
pub(super) fn lock_closed_collection(&self) -> Result<MutexGuard<'_, Option<Collection>>> {
let guard = self.col.lock().unwrap();
guard
.is_none()

View file

@ -34,7 +34,7 @@ pub fn prettify_av_tags<S: Into<String> + AsRef<str>>(txt: S) -> String {
/// Parse `txt` into [CardNodes] and return the result,
/// or [None] if it only contains text nodes.
fn nodes_or_text_only(txt: &str) -> Option<CardNodes> {
fn nodes_or_text_only(txt: &str) -> Option<CardNodes<'_>> {
let nodes = CardNodes::parse(txt);
(!nodes.text_only).then_some(nodes)
}

View file

@ -103,13 +103,13 @@ fn is_not0<'parser, 'arr: 'parser, 's: 'parser>(
move |s| alt((is_not(arr), success(""))).parse(s)
}
fn node(s: &str) -> IResult<Node> {
fn node(s: &str) -> IResult<'_, Node<'_>> {
alt((sound_node, tag_node, text_node)).parse(s)
}
/// A sound tag `[sound:resource]`, where `resource` is pointing to a sound or
/// video file.
fn sound_node(s: &str) -> IResult<Node> {
fn sound_node(s: &str) -> IResult<'_, Node<'_>> {
map(
delimited(tag("[sound:"), is_not("]"), tag("]")),
Node::SoundOrVideo,
@ -117,7 +117,7 @@ fn sound_node(s: &str) -> IResult<Node> {
.parse(s)
}
fn take_till_potential_tag_start(s: &str) -> IResult<&str> {
fn take_till_potential_tag_start(s: &str) -> IResult<'_, &str> {
// first char could be '[', but wasn't part of a node, so skip (eof ends parse)
let (after, offset) = anychar(s).map(|(s, c)| (s, c.len_utf8()))?;
Ok(match after.find('[') {
@ -127,9 +127,9 @@ fn take_till_potential_tag_start(s: &str) -> IResult<&str> {
}
/// An Anki tag `[anki:tag...]...[/anki:tag]`.
fn tag_node(s: &str) -> IResult<Node> {
fn tag_node(s: &str) -> IResult<'_, Node<'_>> {
/// Match the start of an opening tag and return its name.
fn name(s: &str) -> IResult<&str> {
fn name(s: &str) -> IResult<'_, &str> {
preceded(tag("[anki:"), is_not("] \t\r\n")).parse(s)
}
@ -139,12 +139,12 @@ fn tag_node(s: &str) -> IResult<Node> {
) -> impl FnMut(&'s str) -> IResult<'s, Vec<(&'s str, &'s str)>> + 'name {
/// List of whitespace-separated `key=val` tuples, where `val` may be
/// empty.
fn options(s: &str) -> IResult<Vec<(&str, &str)>> {
fn key(s: &str) -> IResult<&str> {
fn options(s: &str) -> IResult<'_, Vec<(&str, &str)>> {
fn key(s: &str) -> IResult<'_, &str> {
is_not("] \t\r\n=").parse(s)
}
fn val(s: &str) -> IResult<&str> {
fn val(s: &str) -> IResult<'_, &str> {
alt((
delimited(tag("\""), is_not0("\""), tag("\"")),
is_not0("] \t\r\n\""),
@ -197,7 +197,7 @@ fn tag_node(s: &str) -> IResult<Node> {
.parse(s)
}
fn text_node(s: &str) -> IResult<Node> {
fn text_node(s: &str) -> IResult<'_, Node<'_>> {
map(take_till_potential_tag_start, Node::Text).parse(s)
}

View file

@ -54,8 +54,8 @@ enum Token<'a> {
}
/// Tokenize string
fn tokenize(mut text: &str) -> impl Iterator<Item = Token> {
fn open_cloze(text: &str) -> IResult<&str, Token> {
fn tokenize(mut text: &str) -> impl Iterator<Item = Token<'_>> {
fn open_cloze(text: &str) -> IResult<&str, Token<'_>> {
// opening brackets and 'c'
let (text, _opening_brackets_and_c) = tag("{{c")(text)?;
// following number
@ -75,12 +75,12 @@ fn tokenize(mut text: &str) -> impl Iterator<Item = Token> {
Ok((text, Token::OpenCloze(digits)))
}
fn close_cloze(text: &str) -> IResult<&str, Token> {
fn close_cloze(text: &str) -> IResult<&str, Token<'_>> {
map(tag("}}"), |_| Token::CloseCloze).parse(text)
}
/// Match a run of text until an open/close marker is encountered.
fn normal_text(text: &str) -> IResult<&str, Token> {
fn normal_text(text: &str) -> IResult<&str, Token<'_>> {
if text.is_empty() {
return Err(nom::Err::Error(nom::error::make_error(
text,
@ -132,7 +132,7 @@ impl ExtractedCloze<'_> {
self.hint.unwrap_or("...")
}
fn clozed_text(&self) -> Cow<str> {
fn clozed_text(&self) -> Cow<'_, str> {
// happy efficient path?
if self.nodes.len() == 1 {
if let TextOrCloze::Text(text) = self.nodes.last().unwrap() {
@ -353,7 +353,7 @@ pub fn parse_image_occlusions(text: &str) -> Vec<ImageOcclusion> {
.collect()
}
pub fn reveal_cloze_text(text: &str, cloze_ord: u16, question: bool) -> Cow<str> {
pub fn reveal_cloze_text(text: &str, cloze_ord: u16, question: bool) -> Cow<'_, str> {
let mut buf = String::new();
let mut active_cloze_found_in_text = false;
for node in &parse_text_with_clozes(text) {
@ -376,7 +376,7 @@ pub fn reveal_cloze_text(text: &str, cloze_ord: u16, question: bool) -> Cow<str>
}
}
pub fn reveal_cloze_text_only(text: &str, cloze_ord: u16, question: bool) -> Cow<str> {
pub fn reveal_cloze_text_only(text: &str, cloze_ord: u16, question: bool) -> Cow<'_, str> {
let mut output = Vec::new();
for node in &parse_text_with_clozes(text) {
reveal_cloze_text_in_nodes(node, cloze_ord, question, &mut output);
@ -384,7 +384,7 @@ pub fn reveal_cloze_text_only(text: &str, cloze_ord: u16, question: bool) -> Cow
output.join(", ").into()
}
pub fn extract_cloze_for_typing(text: &str, cloze_ord: u16) -> Cow<str> {
pub fn extract_cloze_for_typing(text: &str, cloze_ord: u16) -> Cow<'_, str> {
let mut output = Vec::new();
for node in &parse_text_with_clozes(text) {
reveal_cloze_text_in_nodes(node, cloze_ord, false, &mut output);
@ -460,7 +460,7 @@ pub(crate) fn strip_clozes(text: &str) -> Cow<'_, str> {
CLOZE.replace_all(text, "$1")
}
fn strip_html_inside_mathjax(text: &str) -> Cow<str> {
fn strip_html_inside_mathjax(text: &str) -> Cow<'_, str> {
MATHJAX.replace_all(text, |caps: &Captures| -> String {
format!(
"{}{}{}",

View file

@ -191,7 +191,7 @@ fn invalid_char_for_deck_component(c: char) -> bool {
c.is_ascii_control()
}
fn normalized_deck_name_component(comp: &str) -> Cow<str> {
fn normalized_deck_name_component(comp: &str) -> Cow<'_, str> {
let mut out = normalize_to_nfc(comp);
if out.contains(invalid_char_for_deck_component) {
out = out.replace(invalid_char_for_deck_component, "").into();

View file

@ -231,7 +231,10 @@ fn svg_getter(notetypes: &[Notetype]) -> impl Fn(NotetypeId) -> bool {
}
impl Collection {
fn gather_notes(&mut self, search: impl TryIntoSearch) -> Result<(Vec<Note>, NoteTableGuard)> {
fn gather_notes(
&mut self,
search: impl TryIntoSearch,
) -> Result<(Vec<Note>, NoteTableGuard<'_>)> {
let guard = self.search_notes_into_table(search)?;
guard
.col
@ -240,7 +243,7 @@ impl Collection {
.map(|notes| (notes, guard))
}
fn gather_cards(&mut self) -> Result<(Vec<Card>, CardTableGuard)> {
fn gather_cards(&mut self) -> Result<(Vec<Card>, CardTableGuard<'_>)> {
let guard = self.search_cards_of_notes_into_table()?;
guard
.col

View file

@ -664,7 +664,7 @@ mod test {
self
}
fn import(self, col: &mut Collection) -> NoteContext {
fn import(self, col: &mut Collection) -> NoteContext<'_> {
let mut progress_handler = col.new_progress_handler();
let media_map = Box::leak(Box::new(self.media_map));
let mut ctx = NoteContext::new(

View file

@ -154,7 +154,7 @@ pub(super) fn extract_media_entries(
}
}
pub(super) fn safe_normalized_file_name(name: &str) -> Result<Cow<str>> {
pub(super) fn safe_normalized_file_name(name: &str) -> Result<Cow<'_, str>> {
if !filename_is_safe(name) {
Err(AnkiError::ImportError {
source: ImportError::Corrupt,

View file

@ -147,7 +147,7 @@ fn rendered_nodes_to_str(nodes: &[RenderedNode]) -> String {
.join("")
}
fn field_to_record_field(field: &str, with_html: bool) -> Cow<str> {
fn field_to_record_field(field: &str, with_html: bool) -> Cow<'_, str> {
let mut text = strip_redundant_sections(field);
if !with_html {
text = text.map_cow(|t| html_to_text_line(t, false));
@ -155,7 +155,7 @@ fn field_to_record_field(field: &str, with_html: bool) -> Cow<str> {
text
}
fn strip_redundant_sections(text: &str) -> Cow<str> {
fn strip_redundant_sections(text: &str) -> Cow<'_, str> {
static RE: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(
r"(?isx)
@ -169,7 +169,7 @@ fn strip_redundant_sections(text: &str) -> Cow<str> {
RE.replace_all(text.as_ref(), "")
}
fn strip_answer_side_question(text: &str) -> Cow<str> {
fn strip_answer_side_question(text: &str) -> Cow<'_, str> {
static RE: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"(?is)^.*<hr id=answer>\n*").unwrap());
RE.replace_all(text.as_ref(), "")
@ -251,7 +251,7 @@ impl NoteContext {
.chain(self.tags(note))
}
fn notetype_name(&self, note: &Note) -> Option<Cow<[u8]>> {
fn notetype_name(&self, note: &Note) -> Option<Cow<'_, [u8]>> {
self.with_notetype.then(|| {
self.notetypes
.get(&note.notetype_id)
@ -259,7 +259,7 @@ impl NoteContext {
})
}
fn deck_name(&self, note: &Note) -> Option<Cow<[u8]>> {
fn deck_name(&self, note: &Note) -> Option<Cow<'_, [u8]>> {
self.with_deck.then(|| {
self.deck_ids
.get(&note.id)
@ -268,7 +268,7 @@ impl NoteContext {
})
}
fn tags(&self, note: &Note) -> Option<Cow<[u8]>> {
fn tags(&self, note: &Note) -> Option<Cow<'_, [u8]>> {
self.with_tags
.then(|| Cow::from(note.tags.join(" ").into_bytes()))
}

View file

@ -511,7 +511,7 @@ impl NoteContext<'_> {
}
impl Note {
fn first_field_stripped(&self) -> Cow<str> {
fn first_field_stripped(&self) -> Cow<'_, str> {
strip_html_preserving_media_filenames(&self.fields()[0])
}
}
@ -623,7 +623,7 @@ impl ForeignNote {
.all(|(opt, field)| opt.as_ref().map(|s| s == field).unwrap_or(true))
}
fn first_field_stripped(&self) -> Option<Cow<str>> {
fn first_field_stripped(&self) -> Option<Cow<'_, str>> {
self.fields
.first()
.and_then(|s| s.as_ref())

View file

@ -48,7 +48,7 @@ pub struct ExtractedLatex {
pub(crate) fn extract_latex_expanding_clozes(
text: &str,
svg: bool,
) -> (Cow<str>, Vec<ExtractedLatex>) {
) -> (Cow<'_, str>, Vec<ExtractedLatex>) {
if text.contains("{{c") {
let expanded = expand_clozes_to_reveal_latex(text);
let (text, extracts) = extract_latex(&expanded, svg);
@ -60,7 +60,7 @@ pub(crate) fn extract_latex_expanding_clozes(
/// Extract LaTeX from the provided text.
/// Expects cloze deletions to already be expanded.
pub fn extract_latex(text: &str, svg: bool) -> (Cow<str>, Vec<ExtractedLatex>) {
pub fn extract_latex(text: &str, svg: bool) -> (Cow<'_, str>, Vec<ExtractedLatex>) {
let mut extracted = vec![];
let new_text = LATEX.replace_all(text, |caps: &Captures| {
@ -84,7 +84,7 @@ pub fn extract_latex(text: &str, svg: bool) -> (Cow<str>, Vec<ExtractedLatex>) {
(new_text, extracted)
}
fn strip_html_for_latex(html: &str) -> Cow<str> {
fn strip_html_for_latex(html: &str) -> Cow<'_, str> {
let mut out: Cow<str> = html.into();
if let Cow::Owned(o) = LATEX_NEWLINES.replace_all(html, "\n") {
out = o.into();

View file

@ -91,7 +91,7 @@ fn nonbreaking_space(char: char) -> bool {
/// - Any problem characters are removed.
/// - Windows device names like CON and PRN have '_' appended
/// - The filename is limited to 120 bytes.
pub(crate) fn normalize_filename(fname: &str) -> Cow<str> {
pub(crate) fn normalize_filename(fname: &str) -> Cow<'_, str> {
let mut output = Cow::Borrowed(fname);
if !is_nfc(output.as_ref()) {
@ -102,7 +102,7 @@ pub(crate) fn normalize_filename(fname: &str) -> Cow<str> {
}
/// See normalize_filename(). This function expects NFC-normalized input.
pub(crate) fn normalize_nfc_filename(mut fname: Cow<str>) -> Cow<str> {
pub(crate) fn normalize_nfc_filename(mut fname: Cow<'_, str>) -> Cow<'_, str> {
if fname.contains(disallowed_char) {
fname = fname.replace(disallowed_char, "").into()
}
@ -137,7 +137,7 @@ pub(crate) fn normalize_nfc_filename(mut fname: Cow<str>) -> Cow<str> {
/// but can be accessed as NFC. On these devices, if the filename
/// is otherwise valid, the filename is returned as NFC.
#[allow(clippy::collapsible_else_if)]
pub(crate) fn filename_if_normalized(fname: &str) -> Option<Cow<str>> {
pub(crate) fn filename_if_normalized(fname: &str) -> Option<Cow<'_, str>> {
if cfg!(target_vendor = "apple") {
if !is_nfc(fname) {
let as_nfc = fname.chars().nfc().collect::<String>();
@ -208,7 +208,7 @@ pub(crate) fn add_hash_suffix_to_file_stem(fname: &str, hash: &Sha1Hash) -> Stri
}
/// If filename is longer than max_bytes, truncate it.
fn truncate_filename(fname: &str, max_bytes: usize) -> Cow<str> {
fn truncate_filename(fname: &str, max_bytes: usize) -> Cow<'_, str> {
if fname.len() <= max_bytes {
return Cow::Borrowed(fname);
}

View file

@ -25,7 +25,7 @@ pub struct RenderCardOutput {
impl RenderCardOutput {
/// The question text. This is only valid to call when partial_render=false.
pub fn question(&self) -> Cow<str> {
pub fn question(&self) -> Cow<'_, str> {
match self.qnodes.as_slice() {
[RenderedNode::Text { text }] => text.into(),
_ => "not fully rendered".into(),
@ -33,7 +33,7 @@ impl RenderCardOutput {
}
/// The answer text. This is only valid to call when partial_render=false.
pub fn answer(&self) -> Cow<str> {
pub fn answer(&self) -> Cow<'_, str> {
match self.anodes.as_slice() {
[RenderedNode::Text { text }] => text.into(),
_ => "not fully rendered".into(),

View file

@ -174,7 +174,7 @@ impl LoadBalancer {
&self,
note_id: Option<NoteId>,
deckconfig_id: DeckConfigId,
) -> LoadBalancerContext {
) -> LoadBalancerContext<'_> {
LoadBalancerContext {
load_balancer: self,
note_id,

View file

@ -226,7 +226,7 @@ impl Collection {
&mut self,
search: impl TryIntoSearch,
mode: SortMode,
) -> Result<CardTableGuard> {
) -> Result<CardTableGuard<'_>> {
let top_node = search.try_into_search()?;
let writer = SqlWriter::new(self, ReturnItemType::Cards);
let want_order = mode != SortMode::NoOrder;
@ -299,7 +299,7 @@ impl Collection {
pub(crate) fn search_notes_into_table(
&mut self,
search: impl TryIntoSearch,
) -> Result<NoteTableGuard> {
) -> Result<NoteTableGuard<'_>> {
let top_node = search.try_into_search()?;
let writer = SqlWriter::new(self, ReturnItemType::Notes);
let mode = SortMode::NoOrder;
@ -320,7 +320,7 @@ impl Collection {
/// Place the ids of cards with notes in 'search_nids' into 'search_cids'.
/// Returns number of added cards.
pub(crate) fn search_cards_of_notes_into_table(&mut self) -> Result<CardTableGuard> {
pub(crate) fn search_cards_of_notes_into_table(&mut self) -> Result<CardTableGuard<'_>> {
self.storage.setup_searched_cards_table()?;
let cards = self.storage.search_cards_of_notes_into_table()?;
Ok(CardTableGuard { cards, col: self })

View file

@ -158,7 +158,7 @@ pub fn parse(input: &str) -> Result<Vec<Node>> {
/// Zero or more nodes inside brackets, eg 'one OR two -three'.
/// Empty vec must be handled by caller.
fn group_inner(input: &str) -> IResult<Vec<Node>> {
fn group_inner(input: &str) -> IResult<'_, Vec<Node>> {
let mut remaining = input;
let mut nodes = vec![];
@ -203,16 +203,16 @@ fn group_inner(input: &str) -> IResult<Vec<Node>> {
Ok((remaining, nodes))
}
fn whitespace0(s: &str) -> IResult<Vec<char>> {
fn whitespace0(s: &str) -> IResult<'_, Vec<char>> {
many0(one_of(" \u{3000}")).parse(s)
}
/// Optional leading space, then a (negated) group or text
fn node(s: &str) -> IResult<Node> {
fn node(s: &str) -> IResult<'_, Node> {
preceded(whitespace0, alt((negated_node, group, text))).parse(s)
}
fn negated_node(s: &str) -> IResult<Node> {
fn negated_node(s: &str) -> IResult<'_, Node> {
map(preceded(char('-'), alt((group, text))), |node| {
Node::Not(Box::new(node))
})
@ -220,7 +220,7 @@ fn negated_node(s: &str) -> IResult<Node> {
}
/// One or more nodes surrounded by brackets, eg (one OR two)
fn group(s: &str) -> IResult<Node> {
fn group(s: &str) -> IResult<'_, Node> {
let (opened, _) = char('(')(s)?;
let (tail, inner) = group_inner(opened)?;
if let Some(remaining) = tail.strip_prefix(')') {
@ -235,18 +235,18 @@ fn group(s: &str) -> IResult<Node> {
}
/// Either quoted or unquoted text
fn text(s: &str) -> IResult<Node> {
fn text(s: &str) -> IResult<'_, Node> {
alt((quoted_term, partially_quoted_term, unquoted_term)).parse(s)
}
/// Quoted text, including the outer double quotes.
fn quoted_term(s: &str) -> IResult<Node> {
fn quoted_term(s: &str) -> IResult<'_, Node> {
let (remaining, term) = quoted_term_str(s)?;
Ok((remaining, Node::Search(search_node_for_text(term)?)))
}
/// eg deck:"foo bar" - quotes must come after the :
fn partially_quoted_term(s: &str) -> IResult<Node> {
fn partially_quoted_term(s: &str) -> IResult<'_, Node> {
let (remaining, (key, val)) = separated_pair(
escaped(is_not("\"(): \u{3000}\\"), '\\', none_of(" \u{3000}")),
char(':'),
@ -260,7 +260,7 @@ fn partially_quoted_term(s: &str) -> IResult<Node> {
}
/// Unquoted text, terminated by whitespace or unescaped ", ( or )
fn unquoted_term(s: &str) -> IResult<Node> {
fn unquoted_term(s: &str) -> IResult<'_, Node> {
match escaped(is_not("\"() \u{3000}\\"), '\\', none_of(" \u{3000}"))(s) {
Ok((tail, term)) => {
if term.is_empty() {
@ -297,7 +297,7 @@ fn unquoted_term(s: &str) -> IResult<Node> {
}
/// Non-empty string delimited by unescaped double quotes.
fn quoted_term_str(s: &str) -> IResult<&str> {
fn quoted_term_str(s: &str) -> IResult<'_, &str> {
let (opened, _) = char('"')(s)?;
if let Ok((tail, inner)) =
escaped::<_, ParseError, _, _>(is_not(r#""\"#), '\\', anychar).parse(opened)
@ -321,7 +321,7 @@ fn quoted_term_str(s: &str) -> IResult<&str> {
/// Determine if text is a qualified search, and handle escaped chars.
/// Expect well-formed input: unempty and no trailing \.
fn search_node_for_text(s: &str) -> ParseResult<SearchNode> {
fn search_node_for_text(s: &str) -> ParseResult<'_, SearchNode> {
// leading : is only possible error for well-formed input
let (tail, head) = verify(escaped(is_not(r":\"), '\\', anychar), |t: &str| {
!t.is_empty()
@ -369,7 +369,7 @@ fn search_node_for_text_with_argument<'a>(
})
}
fn parse_tag(s: &str) -> ParseResult<SearchNode> {
fn parse_tag(s: &str) -> ParseResult<'_, SearchNode> {
Ok(if let Some(re) = s.strip_prefix("re:") {
SearchNode::Tag {
tag: unescape_quotes(re),
@ -383,7 +383,7 @@ fn parse_tag(s: &str) -> ParseResult<SearchNode> {
})
}
fn parse_template(s: &str) -> ParseResult<SearchNode> {
fn parse_template(s: &str) -> ParseResult<'_, SearchNode> {
Ok(SearchNode::CardTemplate(match s.parse::<u16>() {
Ok(n) => TemplateKind::Ordinal(n.max(1) - 1),
Err(_) => TemplateKind::Name(unescape(s)?),
@ -391,7 +391,7 @@ fn parse_template(s: &str) -> ParseResult<SearchNode> {
}
/// flag:0-7
fn parse_flag(s: &str) -> ParseResult<SearchNode> {
fn parse_flag(s: &str) -> ParseResult<'_, SearchNode> {
if let Ok(flag) = s.parse::<u8>() {
if flag > 7 {
Err(parse_failure(s, FailKind::InvalidFlag))
@ -404,7 +404,7 @@ fn parse_flag(s: &str) -> ParseResult<SearchNode> {
}
/// eg resched:3
fn parse_resched(s: &str) -> ParseResult<SearchNode> {
fn parse_resched(s: &str) -> ParseResult<'_, SearchNode> {
parse_u32(s, "resched:").map(|days| SearchNode::Rated {
days,
ease: RatingKind::ManualReschedule,
@ -412,7 +412,7 @@ fn parse_resched(s: &str) -> ParseResult<SearchNode> {
}
/// eg prop:ivl>3, prop:ease!=2.5
fn parse_prop(prop_clause: &str) -> ParseResult<SearchNode> {
fn parse_prop(prop_clause: &str) -> ParseResult<'_, SearchNode> {
let (tail, prop) = alt((
tag("ivl"),
tag("due"),
@ -580,23 +580,23 @@ fn parse_prop_rated<'a>(num: &str, context: &'a str) -> ParseResult<'a, Property
}
/// eg added:1
fn parse_added(s: &str) -> ParseResult<SearchNode> {
fn parse_added(s: &str) -> ParseResult<'_, SearchNode> {
parse_u32(s, "added:").map(|n| SearchNode::AddedInDays(n.max(1)))
}
/// eg edited:1
fn parse_edited(s: &str) -> ParseResult<SearchNode> {
fn parse_edited(s: &str) -> ParseResult<'_, SearchNode> {
parse_u32(s, "edited:").map(|n| SearchNode::EditedInDays(n.max(1)))
}
/// eg introduced:1
fn parse_introduced(s: &str) -> ParseResult<SearchNode> {
fn parse_introduced(s: &str) -> ParseResult<'_, SearchNode> {
parse_u32(s, "introduced:").map(|n| SearchNode::IntroducedInDays(n.max(1)))
}
/// eg rated:3 or rated:10:2
/// second arg must be between 1-4
fn parse_rated(s: &str) -> ParseResult<SearchNode> {
fn parse_rated(s: &str) -> ParseResult<'_, SearchNode> {
let mut it = s.splitn(2, ':');
let days = parse_u32(it.next().unwrap(), "rated:")?.max(1);
let button = parse_answer_button(it.next(), s)?;
@ -604,7 +604,7 @@ fn parse_rated(s: &str) -> ParseResult<SearchNode> {
}
/// eg is:due
fn parse_state(s: &str) -> ParseResult<SearchNode> {
fn parse_state(s: &str) -> ParseResult<'_, SearchNode> {
use StateKind::*;
Ok(SearchNode::State(match s {
"new" => New,
@ -624,7 +624,7 @@ fn parse_state(s: &str) -> ParseResult<SearchNode> {
}))
}
fn parse_mid(s: &str) -> ParseResult<SearchNode> {
fn parse_mid(s: &str) -> ParseResult<'_, SearchNode> {
parse_i64(s, "mid:").map(|n| SearchNode::NotetypeId(n.into()))
}
@ -646,7 +646,7 @@ fn check_id_list<'a>(s: &'a str, context: &str) -> ParseResult<'a, &'a str> {
}
/// eg dupe:1231,hello
fn parse_dupe(s: &str) -> ParseResult<SearchNode> {
fn parse_dupe(s: &str) -> ParseResult<'_, SearchNode> {
let mut it = s.splitn(2, ',');
let ntid = parse_i64(it.next().unwrap(), s)?;
if let Some(text) = it.next() {
@ -700,7 +700,7 @@ fn unescape_quotes_and_backslashes(s: &str) -> String {
}
/// Unescape chars with special meaning to the parser.
fn unescape(txt: &str) -> ParseResult<String> {
fn unescape(txt: &str) -> ParseResult<'_, String> {
if let Some(seq) = invalid_escape_sequence(txt) {
Err(parse_failure(
txt,

View file

@ -155,7 +155,7 @@ fn invalid_char_for_tag(c: char) -> bool {
c.is_ascii_control() || is_tag_separator(c)
}
fn normalized_tag_name_component(comp: &str) -> Cow<str> {
fn normalized_tag_name_component(comp: &str) -> Cow<'_, str> {
let mut out = normalize_to_nfc(comp);
if out.contains(invalid_char_for_tag) {
out = out.replace(invalid_char_for_tag, "").into();
@ -170,7 +170,7 @@ fn normalized_tag_name_component(comp: &str) -> Cow<str> {
}
}
pub(super) fn normalize_tag_name(name: &str) -> Result<Cow<str>> {
pub(super) fn normalize_tag_name(name: &str) -> Result<Cow<'_, str>> {
let normalized_name: Cow<str> = if name
.split("::")
.any(|comp| matches!(normalized_tag_name_component(comp), Cow::Owned(_)))

View file

@ -121,7 +121,7 @@ pub enum Token<'a> {
CloseConditional(&'a str),
}
fn comment_token(s: &str) -> nom::IResult<&str, Token> {
fn comment_token(s: &str) -> nom::IResult<&str, Token<'_>> {
map(
delimited(
tag(COMMENT_START),
@ -151,7 +151,7 @@ fn tokens(mut template: &str) -> impl Iterator<Item = TemplateResult<Token<'_>>>
}
/// classify handle based on leading character
fn classify_handle(s: &str) -> Token {
fn classify_handle(s: &str) -> Token<'_> {
let start = s.trim_start_matches('{').trim();
if start.len() < 2 {
return Token::Replacement(start);

View file

@ -117,7 +117,7 @@ fn captured_sound(caps: &Captures) -> bool {
caps.get(2).unwrap().as_str().starts_with("sound:")
}
fn kana_filter(text: &str) -> Cow<str> {
fn kana_filter(text: &str) -> Cow<'_, str> {
FURIGANA
.replace_all(&text.replace("&nbsp;", " "), |caps: &Captures| {
if captured_sound(caps) {
@ -130,7 +130,7 @@ fn kana_filter(text: &str) -> Cow<str> {
.into()
}
fn kanji_filter(text: &str) -> Cow<str> {
fn kanji_filter(text: &str) -> Cow<'_, str> {
FURIGANA
.replace_all(&text.replace("&nbsp;", " "), |caps: &Captures| {
if captured_sound(caps) {
@ -143,7 +143,7 @@ fn kanji_filter(text: &str) -> Cow<str> {
.into()
}
fn furigana_filter(text: &str) -> Cow<str> {
fn furigana_filter(text: &str) -> Cow<'_, str> {
FURIGANA
.replace_all(&text.replace("&nbsp;", " "), |caps: &Captures| {
if captured_sound(caps) {

View file

@ -215,8 +215,8 @@ pub fn is_html(text: impl AsRef<str>) -> bool {
HTML.is_match(text.as_ref())
}
pub fn html_to_text_line(html: &str, preserve_media_filenames: bool) -> Cow<str> {
let (html_stripper, sound_rep): (fn(&str) -> Cow<str>, _) = if preserve_media_filenames {
pub fn html_to_text_line(html: &str, preserve_media_filenames: bool) -> Cow<'_, str> {
let (html_stripper, sound_rep): (fn(&str) -> Cow<'_, str>, _) = if preserve_media_filenames {
(strip_html_preserving_media_filenames, "$1")
} else {
(strip_html, "")
@ -229,15 +229,15 @@ pub fn html_to_text_line(html: &str, preserve_media_filenames: bool) -> Cow<str>
.trim()
}
pub fn strip_html(html: &str) -> Cow<str> {
pub fn strip_html(html: &str) -> Cow<'_, str> {
strip_html_preserving_entities(html).map_cow(decode_entities)
}
pub fn strip_html_preserving_entities(html: &str) -> Cow<str> {
pub fn strip_html_preserving_entities(html: &str) -> Cow<'_, str> {
HTML.replace_all(html, "")
}
pub fn decode_entities(html: &str) -> Cow<str> {
pub fn decode_entities(html: &str) -> Cow<'_, str> {
if html.contains('&') {
match htmlescape::decode_html(html) {
Ok(text) => text.replace('\u{a0}', " ").into(),
@ -249,7 +249,7 @@ pub fn decode_entities(html: &str) -> Cow<str> {
}
}
pub(crate) fn newlines_to_spaces(text: &str) -> Cow<str> {
pub(crate) fn newlines_to_spaces(text: &str) -> Cow<'_, str> {
if text.contains('\n') {
text.replace('\n', " ").into()
} else {
@ -257,7 +257,7 @@ pub(crate) fn newlines_to_spaces(text: &str) -> Cow<str> {
}
}
pub fn strip_html_for_tts(html: &str) -> Cow<str> {
pub fn strip_html_for_tts(html: &str) -> Cow<'_, str> {
HTML_LINEBREAK_TAGS
.replace_all(html, " ")
.map_cow(strip_html)
@ -282,7 +282,7 @@ pub(crate) struct MediaRef<'a> {
pub fname_decoded: Cow<'a, str>,
}
pub(crate) fn extract_media_refs(text: &str) -> Vec<MediaRef> {
pub(crate) fn extract_media_refs(text: &str) -> Vec<MediaRef<'_>> {
let mut out = vec![];
for caps in HTML_MEDIA_TAGS.captures_iter(text) {
@ -359,11 +359,11 @@ pub(crate) fn extract_underscored_references(text: &str) -> Vec<&str> {
/// Returns the first matching group as a str. This is intended for regexes
/// where exactly one group matches, and will panic for matches without matching
/// groups.
fn extract_match(caps: Captures) -> &str {
fn extract_match(caps: Captures<'_>) -> &str {
caps.iter().skip(1).find_map(|g| g).unwrap().as_str()
}
pub fn strip_html_preserving_media_filenames(html: &str) -> Cow<str> {
pub fn strip_html_preserving_media_filenames(html: &str) -> Cow<'_, str> {
HTML_MEDIA_TAGS
.replace_all(html, r" ${1}${2}${3} ")
.map_cow(strip_html)
@ -385,7 +385,7 @@ pub(crate) fn sanitize_html_no_images(html: &str) -> String {
.to_string()
}
pub(crate) fn normalize_to_nfc(s: &str) -> Cow<str> {
pub(crate) fn normalize_to_nfc(s: &str) -> Cow<'_, str> {
match is_nfc(s) {
false => s.chars().nfc().collect::<String>().into(),
true => s.into(),
@ -429,7 +429,7 @@ static EXTRA_NO_COMBINING_REPLACEMENTS: phf::Map<char, &str> = phf::phf_map! {
};
/// Convert provided string to NFKD form and strip combining characters.
pub(crate) fn without_combining(s: &str) -> Cow<str> {
pub(crate) fn without_combining(s: &str) -> Cow<'_, str> {
// if the string is already normalized
if matches!(is_nfkd_quick(s.chars()), IsNormalized::Yes) {
// and no combining characters found, return unchanged
@ -472,7 +472,7 @@ pub(crate) fn is_glob(txt: &str) -> bool {
}
/// Convert to a RegEx respecting Anki wildcards.
pub(crate) fn to_re(txt: &str) -> Cow<str> {
pub(crate) fn to_re(txt: &str) -> Cow<'_, str> {
to_custom_re(txt, ".")
}
@ -492,7 +492,7 @@ pub(crate) fn to_custom_re<'a>(txt: &'a str, wildcard: &str) -> Cow<'a, str> {
}
/// Convert to SQL respecting Anki wildcards.
pub(crate) fn to_sql(txt: &str) -> Cow<str> {
pub(crate) fn to_sql(txt: &str) -> Cow<'_, str> {
// escape sequences and unescaped special characters which need conversion
static RE: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"\\[\\*]|[*%]").unwrap());
RE.replace_all(txt, |caps: &Captures| {
@ -508,7 +508,7 @@ pub(crate) fn to_sql(txt: &str) -> Cow<str> {
}
/// Unescape everything.
pub(crate) fn to_text(txt: &str) -> Cow<str> {
pub(crate) fn to_text(txt: &str) -> Cow<'_, str> {
static RE: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"\\(.)").unwrap());
RE.replace_all(txt, "$1")
}
@ -561,14 +561,14 @@ const FRAGMENT_QUERY_UNION: &AsciiSet = &CONTROLS
.add(b'#');
/// IRI-encode unescaped local paths in HTML fragment.
pub(crate) fn encode_iri_paths(unescaped_html: &str) -> Cow<str> {
pub(crate) fn encode_iri_paths(unescaped_html: &str) -> Cow<'_, str> {
transform_html_paths(unescaped_html, |fname| {
utf8_percent_encode(fname, FRAGMENT_QUERY_UNION).into()
})
}
/// URI-decode escaped local paths in HTML fragment.
pub(crate) fn decode_iri_paths(escaped_html: &str) -> Cow<str> {
pub(crate) fn decode_iri_paths(escaped_html: &str) -> Cow<'_, str> {
transform_html_paths(escaped_html, |fname| {
percent_decode_str(fname).decode_utf8_lossy()
})
@ -577,9 +577,9 @@ pub(crate) fn decode_iri_paths(escaped_html: &str) -> Cow<str> {
/// Apply a transform to local filename references in tags like IMG.
/// Required at display time, as Anki unfortunately stores the references
/// in unencoded form in the database.
fn transform_html_paths<F>(html: &str, transform: F) -> Cow<str>
fn transform_html_paths<F>(html: &str, transform: F) -> Cow<'_, str>
where
F: Fn(&str) -> Cow<str>,
F: Fn(&str) -> Cow<'_, str>,
{
HTML_MEDIA_TAGS.replace_all(html, |caps: &Captures| {
let fname = caps

View file

@ -49,7 +49,7 @@ pub fn compare_answer(expected: &str, typed: &str, combining: bool) -> String {
trait DiffTrait {
fn get_typed(&self) -> &[char];
fn get_expected(&self) -> &[char];
fn get_expected_original(&self) -> Cow<str>;
fn get_expected_original(&self) -> Cow<'_, str>;
fn new(expected: &str, typed: &str) -> Self;
@ -136,7 +136,7 @@ fn render_tokens(tokens: &[DiffToken]) -> String {
/// Prefixes a leading mark character with a non-breaking space to prevent
/// it from joining the previous token.
fn isolate_leading_mark(text: &str) -> Cow<str> {
fn isolate_leading_mark(text: &str) -> Cow<'_, str> {
if text
.chars()
.next()
@ -161,7 +161,7 @@ impl DiffTrait for Diff {
fn get_expected(&self) -> &[char] {
&self.expected
}
fn get_expected_original(&self) -> Cow<str> {
fn get_expected_original(&self) -> Cow<'_, str> {
Cow::Owned(self.get_expected().iter().collect::<String>())
}
@ -191,7 +191,7 @@ impl DiffTrait for DiffNonCombining {
fn get_expected(&self) -> &[char] {
&self.base.expected
}
fn get_expected_original(&self) -> Cow<str> {
fn get_expected_original(&self) -> Cow<'_, str> {
Cow::Borrowed(&self.expected_original)
}