diff --git a/Cargo.lock b/Cargo.lock index 3779be49a..a16ee153c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -183,6 +183,7 @@ dependencies = [ "itertools 0.14.0", "num-format", "phf 0.11.3", + "regex", "serde", "serde_json", "unic-langid", diff --git a/build/ninja_gen/src/archives.rs b/build/ninja_gen/src/archives.rs index 3f87d3ff5..3d2120b06 100644 --- a/build/ninja_gen/src/archives.rs +++ b/build/ninja_gen/src/archives.rs @@ -67,7 +67,7 @@ impl Platform { } /// Append .exe to path if on Windows. -pub fn with_exe(path: &str) -> Cow { +pub fn with_exe(path: &str) -> Cow<'_, str> { if cfg!(windows) { format!("{path}.exe").into() } else { diff --git a/build/ninja_gen/src/node.rs b/build/ninja_gen/src/node.rs index b7b66225b..38baa8f62 100644 --- a/build/ninja_gen/src/node.rs +++ b/build/ninja_gen/src/node.rs @@ -98,7 +98,7 @@ impl BuildAction for YarnInstall<'_> { } } -fn with_cmd_ext(bin: &str) -> Cow { +fn with_cmd_ext(bin: &str) -> Cow<'_, str> { if cfg!(windows) { format!("{bin}.cmd").into() } else { diff --git a/rslib/i18n/Cargo.toml b/rslib/i18n/Cargo.toml index 899680cc3..cce8bfe6f 100644 --- a/rslib/i18n/Cargo.toml +++ b/rslib/i18n/Cargo.toml @@ -22,6 +22,7 @@ inflections.workspace = true anki_io.workspace = true anyhow.workspace = true itertools.workspace = true +regex.workspace = true [dependencies] fluent.workspace = true diff --git a/rslib/i18n/src/generated.rs b/rslib/i18n/src/generated.rs index f3526f79f..f3fa71ce8 100644 --- a/rslib/i18n/src/generated.rs +++ b/rslib/i18n/src/generated.rs @@ -4,6 +4,5 @@ // Include auto-generated content #![allow(clippy::all)] -#![allow(text_direction_codepoint_in_literal)] include!(concat!(env!("OUT_DIR"), "/strings.rs")); diff --git a/rslib/i18n/write_strings.rs b/rslib/i18n/write_strings.rs index 36af62eeb..33905d98f 100644 --- a/rslib/i18n/write_strings.rs +++ b/rslib/i18n/write_strings.rs @@ -195,12 +195,30 @@ pub(crate) const {lang_name}: phf::Map<&str, &str> = phf::phf_map! {{", .unwrap(); for (module, contents) in modules { - writeln!(buf, r###" "{module}" => r##"{contents}"##,"###).unwrap(); + let escaped_contents = escape_unicode_control_chars(contents); + writeln!( + buf, + r###" "{module}" => r##"{escaped_contents}"##,"### + ) + .unwrap(); } buf.push_str("};\n"); } +fn escape_unicode_control_chars(input: &str) -> String { + use regex::Regex; + + static RE: std::sync::OnceLock = std::sync::OnceLock::new(); + let re = RE.get_or_init(|| Regex::new(r"[\u{202a}-\u{202e}\u{2066}-\u{2069}]").unwrap()); + + re.replace_all(input, |caps: ®ex::Captures| { + let c = caps.get(0).unwrap().as_str().chars().next().unwrap(); + format!("\\u{{{:04x}}}", c as u32) + }) + .into_owned() +} + fn lang_constant_name(lang: &str) -> String { lang.to_ascii_uppercase().replace('-', "_") } diff --git a/rslib/linkchecker/tests/links.rs b/rslib/linkchecker/tests/links.rs index 2f39fbe31..39201de78 100644 --- a/rslib/linkchecker/tests/links.rs +++ b/rslib/linkchecker/tests/links.rs @@ -42,14 +42,14 @@ enum CheckableUrl { } impl CheckableUrl { - fn url(&self) -> Cow { + fn url(&self) -> Cow<'_, str> { match *self { Self::HelpPage(page) => help_page_to_link(page).into(), Self::String(s) => s.into(), } } - fn anchor(&self) -> Cow { + fn anchor(&self) -> Cow<'_, str> { match *self { Self::HelpPage(page) => help_page_link_suffix(page).into(), Self::String(s) => s.split('#').next_back().unwrap_or_default().into(), diff --git a/rslib/src/backend/collection.rs b/rslib/src/backend/collection.rs index d9f7c6262..5cef74381 100644 --- a/rslib/src/backend/collection.rs +++ b/rslib/src/backend/collection.rs @@ -94,7 +94,7 @@ impl BackendCollectionService for Backend { } impl Backend { - pub(super) fn lock_open_collection(&self) -> Result>> { + pub(super) fn lock_open_collection(&self) -> Result>> { let guard = self.col.lock().unwrap(); guard .is_some() @@ -102,7 +102,7 @@ impl Backend { .ok_or(AnkiError::CollectionNotOpen) } - pub(super) fn lock_closed_collection(&self) -> Result>> { + pub(super) fn lock_closed_collection(&self) -> Result>> { let guard = self.col.lock().unwrap(); guard .is_none() diff --git a/rslib/src/card_rendering/mod.rs b/rslib/src/card_rendering/mod.rs index 3d61a4fe5..262f2a7c9 100644 --- a/rslib/src/card_rendering/mod.rs +++ b/rslib/src/card_rendering/mod.rs @@ -34,7 +34,7 @@ pub fn prettify_av_tags + AsRef>(txt: S) -> String { /// Parse `txt` into [CardNodes] and return the result, /// or [None] if it only contains text nodes. -fn nodes_or_text_only(txt: &str) -> Option { +fn nodes_or_text_only(txt: &str) -> Option> { let nodes = CardNodes::parse(txt); (!nodes.text_only).then_some(nodes) } diff --git a/rslib/src/card_rendering/parser.rs b/rslib/src/card_rendering/parser.rs index b124c069d..0ee66a9b1 100644 --- a/rslib/src/card_rendering/parser.rs +++ b/rslib/src/card_rendering/parser.rs @@ -103,13 +103,13 @@ fn is_not0<'parser, 'arr: 'parser, 's: 'parser>( move |s| alt((is_not(arr), success(""))).parse(s) } -fn node(s: &str) -> IResult { +fn node(s: &str) -> IResult<'_, Node<'_>> { alt((sound_node, tag_node, text_node)).parse(s) } /// A sound tag `[sound:resource]`, where `resource` is pointing to a sound or /// video file. -fn sound_node(s: &str) -> IResult { +fn sound_node(s: &str) -> IResult<'_, Node<'_>> { map( delimited(tag("[sound:"), is_not("]"), tag("]")), Node::SoundOrVideo, @@ -117,7 +117,7 @@ fn sound_node(s: &str) -> IResult { .parse(s) } -fn take_till_potential_tag_start(s: &str) -> IResult<&str> { +fn take_till_potential_tag_start(s: &str) -> IResult<'_, &str> { // first char could be '[', but wasn't part of a node, so skip (eof ends parse) let (after, offset) = anychar(s).map(|(s, c)| (s, c.len_utf8()))?; Ok(match after.find('[') { @@ -127,9 +127,9 @@ fn take_till_potential_tag_start(s: &str) -> IResult<&str> { } /// An Anki tag `[anki:tag...]...[/anki:tag]`. -fn tag_node(s: &str) -> IResult { +fn tag_node(s: &str) -> IResult<'_, Node<'_>> { /// Match the start of an opening tag and return its name. - fn name(s: &str) -> IResult<&str> { + fn name(s: &str) -> IResult<'_, &str> { preceded(tag("[anki:"), is_not("] \t\r\n")).parse(s) } @@ -139,12 +139,12 @@ fn tag_node(s: &str) -> IResult { ) -> impl FnMut(&'s str) -> IResult<'s, Vec<(&'s str, &'s str)>> + 'name { /// List of whitespace-separated `key=val` tuples, where `val` may be /// empty. - fn options(s: &str) -> IResult> { - fn key(s: &str) -> IResult<&str> { + fn options(s: &str) -> IResult<'_, Vec<(&str, &str)>> { + fn key(s: &str) -> IResult<'_, &str> { is_not("] \t\r\n=").parse(s) } - fn val(s: &str) -> IResult<&str> { + fn val(s: &str) -> IResult<'_, &str> { alt(( delimited(tag("\""), is_not0("\""), tag("\"")), is_not0("] \t\r\n\""), @@ -197,7 +197,7 @@ fn tag_node(s: &str) -> IResult { .parse(s) } -fn text_node(s: &str) -> IResult { +fn text_node(s: &str) -> IResult<'_, Node<'_>> { map(take_till_potential_tag_start, Node::Text).parse(s) } diff --git a/rslib/src/cloze.rs b/rslib/src/cloze.rs index 02919dc12..027c14c0c 100644 --- a/rslib/src/cloze.rs +++ b/rslib/src/cloze.rs @@ -54,8 +54,8 @@ enum Token<'a> { } /// Tokenize string -fn tokenize(mut text: &str) -> impl Iterator { - fn open_cloze(text: &str) -> IResult<&str, Token> { +fn tokenize(mut text: &str) -> impl Iterator> { + fn open_cloze(text: &str) -> IResult<&str, Token<'_>> { // opening brackets and 'c' let (text, _opening_brackets_and_c) = tag("{{c")(text)?; // following number @@ -75,12 +75,12 @@ fn tokenize(mut text: &str) -> impl Iterator { Ok((text, Token::OpenCloze(digits))) } - fn close_cloze(text: &str) -> IResult<&str, Token> { + fn close_cloze(text: &str) -> IResult<&str, Token<'_>> { map(tag("}}"), |_| Token::CloseCloze).parse(text) } /// Match a run of text until an open/close marker is encountered. - fn normal_text(text: &str) -> IResult<&str, Token> { + fn normal_text(text: &str) -> IResult<&str, Token<'_>> { if text.is_empty() { return Err(nom::Err::Error(nom::error::make_error( text, @@ -132,7 +132,7 @@ impl ExtractedCloze<'_> { self.hint.unwrap_or("...") } - fn clozed_text(&self) -> Cow { + fn clozed_text(&self) -> Cow<'_, str> { // happy efficient path? if self.nodes.len() == 1 { if let TextOrCloze::Text(text) = self.nodes.last().unwrap() { @@ -353,7 +353,7 @@ pub fn parse_image_occlusions(text: &str) -> Vec { .collect() } -pub fn reveal_cloze_text(text: &str, cloze_ord: u16, question: bool) -> Cow { +pub fn reveal_cloze_text(text: &str, cloze_ord: u16, question: bool) -> Cow<'_, str> { let mut buf = String::new(); let mut active_cloze_found_in_text = false; for node in &parse_text_with_clozes(text) { @@ -376,7 +376,7 @@ pub fn reveal_cloze_text(text: &str, cloze_ord: u16, question: bool) -> Cow } } -pub fn reveal_cloze_text_only(text: &str, cloze_ord: u16, question: bool) -> Cow { +pub fn reveal_cloze_text_only(text: &str, cloze_ord: u16, question: bool) -> Cow<'_, str> { let mut output = Vec::new(); for node in &parse_text_with_clozes(text) { reveal_cloze_text_in_nodes(node, cloze_ord, question, &mut output); @@ -384,7 +384,7 @@ pub fn reveal_cloze_text_only(text: &str, cloze_ord: u16, question: bool) -> Cow output.join(", ").into() } -pub fn extract_cloze_for_typing(text: &str, cloze_ord: u16) -> Cow { +pub fn extract_cloze_for_typing(text: &str, cloze_ord: u16) -> Cow<'_, str> { let mut output = Vec::new(); for node in &parse_text_with_clozes(text) { reveal_cloze_text_in_nodes(node, cloze_ord, false, &mut output); @@ -460,7 +460,7 @@ pub(crate) fn strip_clozes(text: &str) -> Cow<'_, str> { CLOZE.replace_all(text, "$1") } -fn strip_html_inside_mathjax(text: &str) -> Cow { +fn strip_html_inside_mathjax(text: &str) -> Cow<'_, str> { MATHJAX.replace_all(text, |caps: &Captures| -> String { format!( "{}{}{}", diff --git a/rslib/src/decks/name.rs b/rslib/src/decks/name.rs index 09fd2fe65..c7e79a782 100644 --- a/rslib/src/decks/name.rs +++ b/rslib/src/decks/name.rs @@ -191,7 +191,7 @@ fn invalid_char_for_deck_component(c: char) -> bool { c.is_ascii_control() } -fn normalized_deck_name_component(comp: &str) -> Cow { +fn normalized_deck_name_component(comp: &str) -> Cow<'_, str> { let mut out = normalize_to_nfc(comp); if out.contains(invalid_char_for_deck_component) { out = out.replace(invalid_char_for_deck_component, "").into(); diff --git a/rslib/src/import_export/gather.rs b/rslib/src/import_export/gather.rs index 99e4babe2..7249e134a 100644 --- a/rslib/src/import_export/gather.rs +++ b/rslib/src/import_export/gather.rs @@ -231,7 +231,10 @@ fn svg_getter(notetypes: &[Notetype]) -> impl Fn(NotetypeId) -> bool { } impl Collection { - fn gather_notes(&mut self, search: impl TryIntoSearch) -> Result<(Vec, NoteTableGuard)> { + fn gather_notes( + &mut self, + search: impl TryIntoSearch, + ) -> Result<(Vec, NoteTableGuard<'_>)> { let guard = self.search_notes_into_table(search)?; guard .col @@ -240,7 +243,7 @@ impl Collection { .map(|notes| (notes, guard)) } - fn gather_cards(&mut self) -> Result<(Vec, CardTableGuard)> { + fn gather_cards(&mut self) -> Result<(Vec, CardTableGuard<'_>)> { let guard = self.search_cards_of_notes_into_table()?; guard .col diff --git a/rslib/src/import_export/package/apkg/import/notes.rs b/rslib/src/import_export/package/apkg/import/notes.rs index ba5178a18..ce4266289 100644 --- a/rslib/src/import_export/package/apkg/import/notes.rs +++ b/rslib/src/import_export/package/apkg/import/notes.rs @@ -664,7 +664,7 @@ mod test { self } - fn import(self, col: &mut Collection) -> NoteContext { + fn import(self, col: &mut Collection) -> NoteContext<'_> { let mut progress_handler = col.new_progress_handler(); let media_map = Box::leak(Box::new(self.media_map)); let mut ctx = NoteContext::new( diff --git a/rslib/src/import_export/package/media.rs b/rslib/src/import_export/package/media.rs index ff5bdf4d7..8a7e5b726 100644 --- a/rslib/src/import_export/package/media.rs +++ b/rslib/src/import_export/package/media.rs @@ -154,7 +154,7 @@ pub(super) fn extract_media_entries( } } -pub(super) fn safe_normalized_file_name(name: &str) -> Result> { +pub(super) fn safe_normalized_file_name(name: &str) -> Result> { if !filename_is_safe(name) { Err(AnkiError::ImportError { source: ImportError::Corrupt, diff --git a/rslib/src/import_export/text/csv/export.rs b/rslib/src/import_export/text/csv/export.rs index 885035b7e..1af1cfaa9 100644 --- a/rslib/src/import_export/text/csv/export.rs +++ b/rslib/src/import_export/text/csv/export.rs @@ -147,7 +147,7 @@ fn rendered_nodes_to_str(nodes: &[RenderedNode]) -> String { .join("") } -fn field_to_record_field(field: &str, with_html: bool) -> Cow { +fn field_to_record_field(field: &str, with_html: bool) -> Cow<'_, str> { let mut text = strip_redundant_sections(field); if !with_html { text = text.map_cow(|t| html_to_text_line(t, false)); @@ -155,7 +155,7 @@ fn field_to_record_field(field: &str, with_html: bool) -> Cow { text } -fn strip_redundant_sections(text: &str) -> Cow { +fn strip_redundant_sections(text: &str) -> Cow<'_, str> { static RE: LazyLock = LazyLock::new(|| { Regex::new( r"(?isx) @@ -169,7 +169,7 @@ fn strip_redundant_sections(text: &str) -> Cow { RE.replace_all(text.as_ref(), "") } -fn strip_answer_side_question(text: &str) -> Cow { +fn strip_answer_side_question(text: &str) -> Cow<'_, str> { static RE: LazyLock = LazyLock::new(|| Regex::new(r"(?is)^.*
\n*").unwrap()); RE.replace_all(text.as_ref(), "") @@ -251,7 +251,7 @@ impl NoteContext { .chain(self.tags(note)) } - fn notetype_name(&self, note: &Note) -> Option> { + fn notetype_name(&self, note: &Note) -> Option> { self.with_notetype.then(|| { self.notetypes .get(¬e.notetype_id) @@ -259,7 +259,7 @@ impl NoteContext { }) } - fn deck_name(&self, note: &Note) -> Option> { + fn deck_name(&self, note: &Note) -> Option> { self.with_deck.then(|| { self.deck_ids .get(¬e.id) @@ -268,7 +268,7 @@ impl NoteContext { }) } - fn tags(&self, note: &Note) -> Option> { + fn tags(&self, note: &Note) -> Option> { self.with_tags .then(|| Cow::from(note.tags.join(" ").into_bytes())) } diff --git a/rslib/src/import_export/text/import.rs b/rslib/src/import_export/text/import.rs index 202189eb6..1e6f85b3f 100644 --- a/rslib/src/import_export/text/import.rs +++ b/rslib/src/import_export/text/import.rs @@ -511,7 +511,7 @@ impl NoteContext<'_> { } impl Note { - fn first_field_stripped(&self) -> Cow { + fn first_field_stripped(&self) -> Cow<'_, str> { strip_html_preserving_media_filenames(&self.fields()[0]) } } @@ -623,7 +623,7 @@ impl ForeignNote { .all(|(opt, field)| opt.as_ref().map(|s| s == field).unwrap_or(true)) } - fn first_field_stripped(&self) -> Option> { + fn first_field_stripped(&self) -> Option> { self.fields .first() .and_then(|s| s.as_ref()) diff --git a/rslib/src/latex.rs b/rslib/src/latex.rs index e5cb002ac..02056b721 100644 --- a/rslib/src/latex.rs +++ b/rslib/src/latex.rs @@ -48,7 +48,7 @@ pub struct ExtractedLatex { pub(crate) fn extract_latex_expanding_clozes( text: &str, svg: bool, -) -> (Cow, Vec) { +) -> (Cow<'_, str>, Vec) { if text.contains("{{c") { let expanded = expand_clozes_to_reveal_latex(text); let (text, extracts) = extract_latex(&expanded, svg); @@ -60,7 +60,7 @@ pub(crate) fn extract_latex_expanding_clozes( /// Extract LaTeX from the provided text. /// Expects cloze deletions to already be expanded. -pub fn extract_latex(text: &str, svg: bool) -> (Cow, Vec) { +pub fn extract_latex(text: &str, svg: bool) -> (Cow<'_, str>, Vec) { let mut extracted = vec![]; let new_text = LATEX.replace_all(text, |caps: &Captures| { @@ -84,7 +84,7 @@ pub fn extract_latex(text: &str, svg: bool) -> (Cow, Vec) { (new_text, extracted) } -fn strip_html_for_latex(html: &str) -> Cow { +fn strip_html_for_latex(html: &str) -> Cow<'_, str> { let mut out: Cow = html.into(); if let Cow::Owned(o) = LATEX_NEWLINES.replace_all(html, "\n") { out = o.into(); diff --git a/rslib/src/media/files.rs b/rslib/src/media/files.rs index 6974e2f81..ce17b40bb 100644 --- a/rslib/src/media/files.rs +++ b/rslib/src/media/files.rs @@ -91,7 +91,7 @@ fn nonbreaking_space(char: char) -> bool { /// - Any problem characters are removed. /// - Windows device names like CON and PRN have '_' appended /// - The filename is limited to 120 bytes. -pub(crate) fn normalize_filename(fname: &str) -> Cow { +pub(crate) fn normalize_filename(fname: &str) -> Cow<'_, str> { let mut output = Cow::Borrowed(fname); if !is_nfc(output.as_ref()) { @@ -102,7 +102,7 @@ pub(crate) fn normalize_filename(fname: &str) -> Cow { } /// See normalize_filename(). This function expects NFC-normalized input. -pub(crate) fn normalize_nfc_filename(mut fname: Cow) -> Cow { +pub(crate) fn normalize_nfc_filename(mut fname: Cow<'_, str>) -> Cow<'_, str> { if fname.contains(disallowed_char) { fname = fname.replace(disallowed_char, "").into() } @@ -137,7 +137,7 @@ pub(crate) fn normalize_nfc_filename(mut fname: Cow) -> Cow { /// but can be accessed as NFC. On these devices, if the filename /// is otherwise valid, the filename is returned as NFC. #[allow(clippy::collapsible_else_if)] -pub(crate) fn filename_if_normalized(fname: &str) -> Option> { +pub(crate) fn filename_if_normalized(fname: &str) -> Option> { if cfg!(target_vendor = "apple") { if !is_nfc(fname) { let as_nfc = fname.chars().nfc().collect::(); @@ -208,7 +208,7 @@ pub(crate) fn add_hash_suffix_to_file_stem(fname: &str, hash: &Sha1Hash) -> Stri } /// If filename is longer than max_bytes, truncate it. -fn truncate_filename(fname: &str, max_bytes: usize) -> Cow { +fn truncate_filename(fname: &str, max_bytes: usize) -> Cow<'_, str> { if fname.len() <= max_bytes { return Cow::Borrowed(fname); } diff --git a/rslib/src/notetype/render.rs b/rslib/src/notetype/render.rs index 08c5677b0..19f5208dc 100644 --- a/rslib/src/notetype/render.rs +++ b/rslib/src/notetype/render.rs @@ -25,7 +25,7 @@ pub struct RenderCardOutput { impl RenderCardOutput { /// The question text. This is only valid to call when partial_render=false. - pub fn question(&self) -> Cow { + pub fn question(&self) -> Cow<'_, str> { match self.qnodes.as_slice() { [RenderedNode::Text { text }] => text.into(), _ => "not fully rendered".into(), @@ -33,7 +33,7 @@ impl RenderCardOutput { } /// The answer text. This is only valid to call when partial_render=false. - pub fn answer(&self) -> Cow { + pub fn answer(&self) -> Cow<'_, str> { match self.anodes.as_slice() { [RenderedNode::Text { text }] => text.into(), _ => "not fully rendered".into(), diff --git a/rslib/src/scheduler/states/load_balancer.rs b/rslib/src/scheduler/states/load_balancer.rs index 20b6936df..8cb9e6a1c 100644 --- a/rslib/src/scheduler/states/load_balancer.rs +++ b/rslib/src/scheduler/states/load_balancer.rs @@ -174,7 +174,7 @@ impl LoadBalancer { &self, note_id: Option, deckconfig_id: DeckConfigId, - ) -> LoadBalancerContext { + ) -> LoadBalancerContext<'_> { LoadBalancerContext { load_balancer: self, note_id, diff --git a/rslib/src/search/mod.rs b/rslib/src/search/mod.rs index d42ea8323..0960fabf9 100644 --- a/rslib/src/search/mod.rs +++ b/rslib/src/search/mod.rs @@ -226,7 +226,7 @@ impl Collection { &mut self, search: impl TryIntoSearch, mode: SortMode, - ) -> Result { + ) -> Result> { let top_node = search.try_into_search()?; let writer = SqlWriter::new(self, ReturnItemType::Cards); let want_order = mode != SortMode::NoOrder; @@ -299,7 +299,7 @@ impl Collection { pub(crate) fn search_notes_into_table( &mut self, search: impl TryIntoSearch, - ) -> Result { + ) -> Result> { let top_node = search.try_into_search()?; let writer = SqlWriter::new(self, ReturnItemType::Notes); let mode = SortMode::NoOrder; @@ -320,7 +320,7 @@ impl Collection { /// Place the ids of cards with notes in 'search_nids' into 'search_cids'. /// Returns number of added cards. - pub(crate) fn search_cards_of_notes_into_table(&mut self) -> Result { + pub(crate) fn search_cards_of_notes_into_table(&mut self) -> Result> { self.storage.setup_searched_cards_table()?; let cards = self.storage.search_cards_of_notes_into_table()?; Ok(CardTableGuard { cards, col: self }) diff --git a/rslib/src/search/parser.rs b/rslib/src/search/parser.rs index ae166ef54..33c1a4622 100644 --- a/rslib/src/search/parser.rs +++ b/rslib/src/search/parser.rs @@ -158,7 +158,7 @@ pub fn parse(input: &str) -> Result> { /// Zero or more nodes inside brackets, eg 'one OR two -three'. /// Empty vec must be handled by caller. -fn group_inner(input: &str) -> IResult> { +fn group_inner(input: &str) -> IResult<'_, Vec> { let mut remaining = input; let mut nodes = vec![]; @@ -203,16 +203,16 @@ fn group_inner(input: &str) -> IResult> { Ok((remaining, nodes)) } -fn whitespace0(s: &str) -> IResult> { +fn whitespace0(s: &str) -> IResult<'_, Vec> { many0(one_of(" \u{3000}")).parse(s) } /// Optional leading space, then a (negated) group or text -fn node(s: &str) -> IResult { +fn node(s: &str) -> IResult<'_, Node> { preceded(whitespace0, alt((negated_node, group, text))).parse(s) } -fn negated_node(s: &str) -> IResult { +fn negated_node(s: &str) -> IResult<'_, Node> { map(preceded(char('-'), alt((group, text))), |node| { Node::Not(Box::new(node)) }) @@ -220,7 +220,7 @@ fn negated_node(s: &str) -> IResult { } /// One or more nodes surrounded by brackets, eg (one OR two) -fn group(s: &str) -> IResult { +fn group(s: &str) -> IResult<'_, Node> { let (opened, _) = char('(')(s)?; let (tail, inner) = group_inner(opened)?; if let Some(remaining) = tail.strip_prefix(')') { @@ -235,18 +235,18 @@ fn group(s: &str) -> IResult { } /// Either quoted or unquoted text -fn text(s: &str) -> IResult { +fn text(s: &str) -> IResult<'_, Node> { alt((quoted_term, partially_quoted_term, unquoted_term)).parse(s) } /// Quoted text, including the outer double quotes. -fn quoted_term(s: &str) -> IResult { +fn quoted_term(s: &str) -> IResult<'_, Node> { let (remaining, term) = quoted_term_str(s)?; Ok((remaining, Node::Search(search_node_for_text(term)?))) } /// eg deck:"foo bar" - quotes must come after the : -fn partially_quoted_term(s: &str) -> IResult { +fn partially_quoted_term(s: &str) -> IResult<'_, Node> { let (remaining, (key, val)) = separated_pair( escaped(is_not("\"(): \u{3000}\\"), '\\', none_of(" \u{3000}")), char(':'), @@ -260,7 +260,7 @@ fn partially_quoted_term(s: &str) -> IResult { } /// Unquoted text, terminated by whitespace or unescaped ", ( or ) -fn unquoted_term(s: &str) -> IResult { +fn unquoted_term(s: &str) -> IResult<'_, Node> { match escaped(is_not("\"() \u{3000}\\"), '\\', none_of(" \u{3000}"))(s) { Ok((tail, term)) => { if term.is_empty() { @@ -297,7 +297,7 @@ fn unquoted_term(s: &str) -> IResult { } /// Non-empty string delimited by unescaped double quotes. -fn quoted_term_str(s: &str) -> IResult<&str> { +fn quoted_term_str(s: &str) -> IResult<'_, &str> { let (opened, _) = char('"')(s)?; if let Ok((tail, inner)) = escaped::<_, ParseError, _, _>(is_not(r#""\"#), '\\', anychar).parse(opened) @@ -321,7 +321,7 @@ fn quoted_term_str(s: &str) -> IResult<&str> { /// Determine if text is a qualified search, and handle escaped chars. /// Expect well-formed input: unempty and no trailing \. -fn search_node_for_text(s: &str) -> ParseResult { +fn search_node_for_text(s: &str) -> ParseResult<'_, SearchNode> { // leading : is only possible error for well-formed input let (tail, head) = verify(escaped(is_not(r":\"), '\\', anychar), |t: &str| { !t.is_empty() @@ -369,7 +369,7 @@ fn search_node_for_text_with_argument<'a>( }) } -fn parse_tag(s: &str) -> ParseResult { +fn parse_tag(s: &str) -> ParseResult<'_, SearchNode> { Ok(if let Some(re) = s.strip_prefix("re:") { SearchNode::Tag { tag: unescape_quotes(re), @@ -383,7 +383,7 @@ fn parse_tag(s: &str) -> ParseResult { }) } -fn parse_template(s: &str) -> ParseResult { +fn parse_template(s: &str) -> ParseResult<'_, SearchNode> { Ok(SearchNode::CardTemplate(match s.parse::() { Ok(n) => TemplateKind::Ordinal(n.max(1) - 1), Err(_) => TemplateKind::Name(unescape(s)?), @@ -391,7 +391,7 @@ fn parse_template(s: &str) -> ParseResult { } /// flag:0-7 -fn parse_flag(s: &str) -> ParseResult { +fn parse_flag(s: &str) -> ParseResult<'_, SearchNode> { if let Ok(flag) = s.parse::() { if flag > 7 { Err(parse_failure(s, FailKind::InvalidFlag)) @@ -404,7 +404,7 @@ fn parse_flag(s: &str) -> ParseResult { } /// eg resched:3 -fn parse_resched(s: &str) -> ParseResult { +fn parse_resched(s: &str) -> ParseResult<'_, SearchNode> { parse_u32(s, "resched:").map(|days| SearchNode::Rated { days, ease: RatingKind::ManualReschedule, @@ -412,7 +412,7 @@ fn parse_resched(s: &str) -> ParseResult { } /// eg prop:ivl>3, prop:ease!=2.5 -fn parse_prop(prop_clause: &str) -> ParseResult { +fn parse_prop(prop_clause: &str) -> ParseResult<'_, SearchNode> { let (tail, prop) = alt(( tag("ivl"), tag("due"), @@ -580,23 +580,23 @@ fn parse_prop_rated<'a>(num: &str, context: &'a str) -> ParseResult<'a, Property } /// eg added:1 -fn parse_added(s: &str) -> ParseResult { +fn parse_added(s: &str) -> ParseResult<'_, SearchNode> { parse_u32(s, "added:").map(|n| SearchNode::AddedInDays(n.max(1))) } /// eg edited:1 -fn parse_edited(s: &str) -> ParseResult { +fn parse_edited(s: &str) -> ParseResult<'_, SearchNode> { parse_u32(s, "edited:").map(|n| SearchNode::EditedInDays(n.max(1))) } /// eg introduced:1 -fn parse_introduced(s: &str) -> ParseResult { +fn parse_introduced(s: &str) -> ParseResult<'_, SearchNode> { parse_u32(s, "introduced:").map(|n| SearchNode::IntroducedInDays(n.max(1))) } /// eg rated:3 or rated:10:2 /// second arg must be between 1-4 -fn parse_rated(s: &str) -> ParseResult { +fn parse_rated(s: &str) -> ParseResult<'_, SearchNode> { let mut it = s.splitn(2, ':'); let days = parse_u32(it.next().unwrap(), "rated:")?.max(1); let button = parse_answer_button(it.next(), s)?; @@ -604,7 +604,7 @@ fn parse_rated(s: &str) -> ParseResult { } /// eg is:due -fn parse_state(s: &str) -> ParseResult { +fn parse_state(s: &str) -> ParseResult<'_, SearchNode> { use StateKind::*; Ok(SearchNode::State(match s { "new" => New, @@ -624,7 +624,7 @@ fn parse_state(s: &str) -> ParseResult { })) } -fn parse_mid(s: &str) -> ParseResult { +fn parse_mid(s: &str) -> ParseResult<'_, SearchNode> { parse_i64(s, "mid:").map(|n| SearchNode::NotetypeId(n.into())) } @@ -646,7 +646,7 @@ fn check_id_list<'a>(s: &'a str, context: &str) -> ParseResult<'a, &'a str> { } /// eg dupe:1231,hello -fn parse_dupe(s: &str) -> ParseResult { +fn parse_dupe(s: &str) -> ParseResult<'_, SearchNode> { let mut it = s.splitn(2, ','); let ntid = parse_i64(it.next().unwrap(), s)?; if let Some(text) = it.next() { @@ -700,7 +700,7 @@ fn unescape_quotes_and_backslashes(s: &str) -> String { } /// Unescape chars with special meaning to the parser. -fn unescape(txt: &str) -> ParseResult { +fn unescape(txt: &str) -> ParseResult<'_, String> { if let Some(seq) = invalid_escape_sequence(txt) { Err(parse_failure( txt, diff --git a/rslib/src/tags/register.rs b/rslib/src/tags/register.rs index 9605ea92a..d02fa260a 100644 --- a/rslib/src/tags/register.rs +++ b/rslib/src/tags/register.rs @@ -155,7 +155,7 @@ fn invalid_char_for_tag(c: char) -> bool { c.is_ascii_control() || is_tag_separator(c) } -fn normalized_tag_name_component(comp: &str) -> Cow { +fn normalized_tag_name_component(comp: &str) -> Cow<'_, str> { let mut out = normalize_to_nfc(comp); if out.contains(invalid_char_for_tag) { out = out.replace(invalid_char_for_tag, "").into(); @@ -170,7 +170,7 @@ fn normalized_tag_name_component(comp: &str) -> Cow { } } -pub(super) fn normalize_tag_name(name: &str) -> Result> { +pub(super) fn normalize_tag_name(name: &str) -> Result> { let normalized_name: Cow = if name .split("::") .any(|comp| matches!(normalized_tag_name_component(comp), Cow::Owned(_))) diff --git a/rslib/src/template.rs b/rslib/src/template.rs index 4895cc162..aa84e0e7f 100644 --- a/rslib/src/template.rs +++ b/rslib/src/template.rs @@ -121,7 +121,7 @@ pub enum Token<'a> { CloseConditional(&'a str), } -fn comment_token(s: &str) -> nom::IResult<&str, Token> { +fn comment_token(s: &str) -> nom::IResult<&str, Token<'_>> { map( delimited( tag(COMMENT_START), @@ -151,7 +151,7 @@ fn tokens(mut template: &str) -> impl Iterator>> } /// classify handle based on leading character -fn classify_handle(s: &str) -> Token { +fn classify_handle(s: &str) -> Token<'_> { let start = s.trim_start_matches('{').trim(); if start.len() < 2 { return Token::Replacement(start); diff --git a/rslib/src/template_filters.rs b/rslib/src/template_filters.rs index 4949e756d..66e9ecb37 100644 --- a/rslib/src/template_filters.rs +++ b/rslib/src/template_filters.rs @@ -117,7 +117,7 @@ fn captured_sound(caps: &Captures) -> bool { caps.get(2).unwrap().as_str().starts_with("sound:") } -fn kana_filter(text: &str) -> Cow { +fn kana_filter(text: &str) -> Cow<'_, str> { FURIGANA .replace_all(&text.replace(" ", " "), |caps: &Captures| { if captured_sound(caps) { @@ -130,7 +130,7 @@ fn kana_filter(text: &str) -> Cow { .into() } -fn kanji_filter(text: &str) -> Cow { +fn kanji_filter(text: &str) -> Cow<'_, str> { FURIGANA .replace_all(&text.replace(" ", " "), |caps: &Captures| { if captured_sound(caps) { @@ -143,7 +143,7 @@ fn kanji_filter(text: &str) -> Cow { .into() } -fn furigana_filter(text: &str) -> Cow { +fn furigana_filter(text: &str) -> Cow<'_, str> { FURIGANA .replace_all(&text.replace(" ", " "), |caps: &Captures| { if captured_sound(caps) { diff --git a/rslib/src/text.rs b/rslib/src/text.rs index 590c05b39..037366c28 100644 --- a/rslib/src/text.rs +++ b/rslib/src/text.rs @@ -215,8 +215,8 @@ pub fn is_html(text: impl AsRef) -> bool { HTML.is_match(text.as_ref()) } -pub fn html_to_text_line(html: &str, preserve_media_filenames: bool) -> Cow { - let (html_stripper, sound_rep): (fn(&str) -> Cow, _) = if preserve_media_filenames { +pub fn html_to_text_line(html: &str, preserve_media_filenames: bool) -> Cow<'_, str> { + let (html_stripper, sound_rep): (fn(&str) -> Cow<'_, str>, _) = if preserve_media_filenames { (strip_html_preserving_media_filenames, "$1") } else { (strip_html, "") @@ -229,15 +229,15 @@ pub fn html_to_text_line(html: &str, preserve_media_filenames: bool) -> Cow .trim() } -pub fn strip_html(html: &str) -> Cow { +pub fn strip_html(html: &str) -> Cow<'_, str> { strip_html_preserving_entities(html).map_cow(decode_entities) } -pub fn strip_html_preserving_entities(html: &str) -> Cow { +pub fn strip_html_preserving_entities(html: &str) -> Cow<'_, str> { HTML.replace_all(html, "") } -pub fn decode_entities(html: &str) -> Cow { +pub fn decode_entities(html: &str) -> Cow<'_, str> { if html.contains('&') { match htmlescape::decode_html(html) { Ok(text) => text.replace('\u{a0}', " ").into(), @@ -249,7 +249,7 @@ pub fn decode_entities(html: &str) -> Cow { } } -pub(crate) fn newlines_to_spaces(text: &str) -> Cow { +pub(crate) fn newlines_to_spaces(text: &str) -> Cow<'_, str> { if text.contains('\n') { text.replace('\n', " ").into() } else { @@ -257,7 +257,7 @@ pub(crate) fn newlines_to_spaces(text: &str) -> Cow { } } -pub fn strip_html_for_tts(html: &str) -> Cow { +pub fn strip_html_for_tts(html: &str) -> Cow<'_, str> { HTML_LINEBREAK_TAGS .replace_all(html, " ") .map_cow(strip_html) @@ -282,7 +282,7 @@ pub(crate) struct MediaRef<'a> { pub fname_decoded: Cow<'a, str>, } -pub(crate) fn extract_media_refs(text: &str) -> Vec { +pub(crate) fn extract_media_refs(text: &str) -> Vec> { let mut out = vec![]; for caps in HTML_MEDIA_TAGS.captures_iter(text) { @@ -359,11 +359,11 @@ pub(crate) fn extract_underscored_references(text: &str) -> Vec<&str> { /// Returns the first matching group as a str. This is intended for regexes /// where exactly one group matches, and will panic for matches without matching /// groups. -fn extract_match(caps: Captures) -> &str { +fn extract_match(caps: Captures<'_>) -> &str { caps.iter().skip(1).find_map(|g| g).unwrap().as_str() } -pub fn strip_html_preserving_media_filenames(html: &str) -> Cow { +pub fn strip_html_preserving_media_filenames(html: &str) -> Cow<'_, str> { HTML_MEDIA_TAGS .replace_all(html, r" ${1}${2}${3} ") .map_cow(strip_html) @@ -385,7 +385,7 @@ pub(crate) fn sanitize_html_no_images(html: &str) -> String { .to_string() } -pub(crate) fn normalize_to_nfc(s: &str) -> Cow { +pub(crate) fn normalize_to_nfc(s: &str) -> Cow<'_, str> { match is_nfc(s) { false => s.chars().nfc().collect::().into(), true => s.into(), @@ -429,7 +429,7 @@ static EXTRA_NO_COMBINING_REPLACEMENTS: phf::Map = phf::phf_map! { }; /// Convert provided string to NFKD form and strip combining characters. -pub(crate) fn without_combining(s: &str) -> Cow { +pub(crate) fn without_combining(s: &str) -> Cow<'_, str> { // if the string is already normalized if matches!(is_nfkd_quick(s.chars()), IsNormalized::Yes) { // and no combining characters found, return unchanged @@ -472,7 +472,7 @@ pub(crate) fn is_glob(txt: &str) -> bool { } /// Convert to a RegEx respecting Anki wildcards. -pub(crate) fn to_re(txt: &str) -> Cow { +pub(crate) fn to_re(txt: &str) -> Cow<'_, str> { to_custom_re(txt, ".") } @@ -492,7 +492,7 @@ pub(crate) fn to_custom_re<'a>(txt: &'a str, wildcard: &str) -> Cow<'a, str> { } /// Convert to SQL respecting Anki wildcards. -pub(crate) fn to_sql(txt: &str) -> Cow { +pub(crate) fn to_sql(txt: &str) -> Cow<'_, str> { // escape sequences and unescaped special characters which need conversion static RE: LazyLock = LazyLock::new(|| Regex::new(r"\\[\\*]|[*%]").unwrap()); RE.replace_all(txt, |caps: &Captures| { @@ -508,7 +508,7 @@ pub(crate) fn to_sql(txt: &str) -> Cow { } /// Unescape everything. -pub(crate) fn to_text(txt: &str) -> Cow { +pub(crate) fn to_text(txt: &str) -> Cow<'_, str> { static RE: LazyLock = LazyLock::new(|| Regex::new(r"\\(.)").unwrap()); RE.replace_all(txt, "$1") } @@ -561,14 +561,14 @@ const FRAGMENT_QUERY_UNION: &AsciiSet = &CONTROLS .add(b'#'); /// IRI-encode unescaped local paths in HTML fragment. -pub(crate) fn encode_iri_paths(unescaped_html: &str) -> Cow { +pub(crate) fn encode_iri_paths(unescaped_html: &str) -> Cow<'_, str> { transform_html_paths(unescaped_html, |fname| { utf8_percent_encode(fname, FRAGMENT_QUERY_UNION).into() }) } /// URI-decode escaped local paths in HTML fragment. -pub(crate) fn decode_iri_paths(escaped_html: &str) -> Cow { +pub(crate) fn decode_iri_paths(escaped_html: &str) -> Cow<'_, str> { transform_html_paths(escaped_html, |fname| { percent_decode_str(fname).decode_utf8_lossy() }) @@ -577,9 +577,9 @@ pub(crate) fn decode_iri_paths(escaped_html: &str) -> Cow { /// Apply a transform to local filename references in tags like IMG. /// Required at display time, as Anki unfortunately stores the references /// in unencoded form in the database. -fn transform_html_paths(html: &str, transform: F) -> Cow +fn transform_html_paths(html: &str, transform: F) -> Cow<'_, str> where - F: Fn(&str) -> Cow, + F: Fn(&str) -> Cow<'_, str>, { HTML_MEDIA_TAGS.replace_all(html, |caps: &Captures| { let fname = caps diff --git a/rslib/src/typeanswer.rs b/rslib/src/typeanswer.rs index 1432ad50d..08c638e12 100644 --- a/rslib/src/typeanswer.rs +++ b/rslib/src/typeanswer.rs @@ -49,7 +49,7 @@ pub fn compare_answer(expected: &str, typed: &str, combining: bool) -> String { trait DiffTrait { fn get_typed(&self) -> &[char]; fn get_expected(&self) -> &[char]; - fn get_expected_original(&self) -> Cow; + fn get_expected_original(&self) -> Cow<'_, str>; fn new(expected: &str, typed: &str) -> Self; @@ -136,7 +136,7 @@ fn render_tokens(tokens: &[DiffToken]) -> String { /// Prefixes a leading mark character with a non-breaking space to prevent /// it from joining the previous token. -fn isolate_leading_mark(text: &str) -> Cow { +fn isolate_leading_mark(text: &str) -> Cow<'_, str> { if text .chars() .next() @@ -161,7 +161,7 @@ impl DiffTrait for Diff { fn get_expected(&self) -> &[char] { &self.expected } - fn get_expected_original(&self) -> Cow { + fn get_expected_original(&self) -> Cow<'_, str> { Cow::Owned(self.get_expected().iter().collect::()) } @@ -191,7 +191,7 @@ impl DiffTrait for DiffNonCombining { fn get_expected(&self) -> &[char] { &self.base.expected } - fn get_expected_original(&self) -> Cow { + fn get_expected_original(&self) -> Cow<'_, str> { Cow::Borrowed(&self.expected_original) }