Merge branch 'main' into svelte-reviewer-bottom

This commit is contained in:
Luc Mcgrady 2025-09-03 22:08:20 +01:00 committed by GitHub
commit 1ec9f4902e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
64 changed files with 1086 additions and 827 deletions

View file

@ -238,6 +238,9 @@ Bradley Szoke <bradleyszoke@gmail.com>
jcznk <https://github.com/jcznk> jcznk <https://github.com/jcznk>
Thomas Rixen <thomas.rixen@student.uclouvain.be> Thomas Rixen <thomas.rixen@student.uclouvain.be>
Siyuan Mattuwu Yan <syan4@ualberta.ca> Siyuan Mattuwu Yan <syan4@ualberta.ca>
Lee Doughty <32392044+leedoughty@users.noreply.github.com>
memchr <memchr@proton.me>
Aldlss <ayaldlss@gmail.com>
******************** ********************

1062
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -133,7 +133,7 @@ tokio-util = { version = "0.7.15", features = ["io"] }
tower-http = { version = "0.6.6", features = ["trace"] } tower-http = { version = "0.6.6", features = ["trace"] }
tracing = { version = "0.1.41", features = ["max_level_trace", "release_max_level_debug"] } tracing = { version = "0.1.41", features = ["max_level_trace", "release_max_level_debug"] }
tracing-appender = "0.2.3" tracing-appender = "0.2.3"
tracing-subscriber = { version = "0.3.19", features = ["fmt", "env-filter"] } tracing-subscriber = { version = "0.3.20", features = ["fmt", "env-filter"] }
unic-langid = { version = "0.9.6", features = ["macros"] } unic-langid = { version = "0.9.6", features = ["macros"] }
unic-ucd-category = "0.9.0" unic-ucd-category = "0.9.0"
unicode-normalization = "0.1.24" unicode-normalization = "0.1.24"

View file

@ -49,6 +49,46 @@ pub trait BuildAction {
} }
fn name(&self) -> &'static str { fn name(&self) -> &'static str {
std::any::type_name::<Self>().split("::").last().unwrap() std::any::type_name::<Self>()
.split("::")
.last()
.unwrap()
.split('<')
.next()
.unwrap()
} }
} }
#[cfg(test)]
trait TestBuildAction {}
#[cfg(test)]
impl<T: TestBuildAction + ?Sized> BuildAction for T {
fn command(&self) -> &str {
"test"
}
fn files(&mut self, _build: &mut impl FilesHandle) {}
}
#[allow(dead_code, unused_variables)]
#[test]
fn should_strip_regions_in_type_name() {
struct Bare;
impl TestBuildAction for Bare {}
assert_eq!(Bare {}.name(), "Bare");
struct WithLifeTime<'a>(&'a str);
impl TestBuildAction for WithLifeTime<'_> {}
assert_eq!(WithLifeTime("test").name(), "WithLifeTime");
struct WithMultiLifeTime<'a, 'b>(&'a str, &'b str);
impl TestBuildAction for WithMultiLifeTime<'_, '_> {}
assert_eq!(
WithMultiLifeTime("test", "test").name(),
"WithMultiLifeTime"
);
struct WithGeneric<T>(T);
impl<T> TestBuildAction for WithGeneric<T> {}
assert_eq!(WithGeneric(3).name(), "WithGeneric");
}

View file

@ -67,7 +67,7 @@ impl Platform {
} }
/// Append .exe to path if on Windows. /// Append .exe to path if on Windows.
pub fn with_exe(path: &str) -> Cow<str> { pub fn with_exe(path: &str) -> Cow<'_, str> {
if cfg!(windows) { if cfg!(windows) {
format!("{path}.exe").into() format!("{path}.exe").into()
} else { } else {

View file

@ -98,7 +98,7 @@ impl BuildAction for YarnInstall<'_> {
} }
} }
fn with_cmd_ext(bin: &str) -> Cow<str> { fn with_cmd_ext(bin: &str) -> Cow<'_, str> {
if cfg!(windows) { if cfg!(windows) {
format!("{bin}.cmd").into() format!("{bin}.cmd").into()
} else { } else {

View file

@ -599,6 +599,22 @@
"name": "colored", "name": "colored",
"repository": "https://github.com/mackwic/colored" "repository": "https://github.com/mackwic/colored"
}, },
{
"authors": "Wim Looman <wim@nemo157.com>|Allen Bui <fairingrey@gmail.com>",
"description": "Adaptors for various compression algorithms.",
"license": "Apache-2.0 OR MIT",
"license_file": null,
"name": "compression-codecs",
"repository": "https://github.com/Nullus157/async-compression"
},
{
"authors": "Wim Looman <wim@nemo157.com>|Allen Bui <fairingrey@gmail.com>",
"description": "Abstractions for compression algorithms.",
"license": "Apache-2.0 OR MIT",
"license_file": null,
"name": "compression-core",
"repository": "https://github.com/Nullus157/async-compression"
},
{ {
"authors": "Stjepan Glavina <stjepang@gmail.com>|Taiki Endo <te316e89@gmail.com>|John Nunley <dev@notgull.net>", "authors": "Stjepan Glavina <stjepang@gmail.com>|Taiki Endo <te316e89@gmail.com>|John Nunley <dev@notgull.net>",
"description": "Concurrent multi-producer multi-consumer queue", "description": "Concurrent multi-producer multi-consumer queue",
@ -1759,6 +1775,14 @@
"name": "http-body-util", "name": "http-body-util",
"repository": "https://github.com/hyperium/http-body" "repository": "https://github.com/hyperium/http-body"
}, },
{
"authors": null,
"description": "No-dep range header parser",
"license": "MIT",
"license_file": null,
"name": "http-range-header",
"repository": "https://github.com/MarcusGrass/parse-range-headers"
},
{ {
"authors": "Sean McArthur <sean@seanmonstar.com>", "authors": "Sean McArthur <sean@seanmonstar.com>",
"description": "A tiny, safe, speedy, zero-copy HTTP/1.x parser.", "description": "A tiny, safe, speedy, zero-copy HTTP/1.x parser.",
@ -1943,6 +1967,14 @@
"name": "intl_pluralrules", "name": "intl_pluralrules",
"repository": "https://github.com/zbraniecki/pluralrules" "repository": "https://github.com/zbraniecki/pluralrules"
}, },
{
"authors": "quininer <quininer@live.com>",
"description": "The low-level `io_uring` userspace interface for Rust",
"license": "Apache-2.0 OR MIT",
"license_file": null,
"name": "io-uring",
"repository": "https://github.com/tokio-rs/io-uring"
},
{ {
"authors": "Kris Price <kris@krisprice.nz>", "authors": "Kris Price <kris@krisprice.nz>",
"description": "Provides types and useful methods for working with IPv4 and IPv6 network addresses, commonly called IP prefixes. The new `IpNet`, `Ipv4Net`, and `Ipv6Net` types build on the existing `IpAddr`, `Ipv4Addr`, and `Ipv6Addr` types already provided in Rust's standard library and align to their design to stay consistent. The module also provides useful traits that extend `Ipv4Addr` and `Ipv6Addr` with methods for `Add`, `Sub`, `BitAnd`, and `BitOr` operations. The module only uses stable feature so it is guaranteed to compile using the stable toolchain.", "description": "Provides types and useful methods for working with IPv4 and IPv6 network addresses, commonly called IP prefixes. The new `IpNet`, `Ipv4Net`, and `Ipv6Net` types build on the existing `IpAddr`, `Ipv4Addr`, and `Ipv6Addr` types already provided in Rust's standard library and align to their design to stay consistent. The module also provides useful traits that extend `Ipv4Addr` and `Ipv6Addr` with methods for `Add`, `Sub`, `BitAnd`, and `BitOr` operations. The module only uses stable feature so it is guaranteed to compile using the stable toolchain.",
@ -2168,7 +2200,7 @@
"repository": "https://github.com/servo/html5ever" "repository": "https://github.com/servo/html5ever"
}, },
{ {
"authors": null, "authors": "The html5ever Project Developers",
"description": "Procedural macro for html5ever.", "description": "Procedural macro for html5ever.",
"license": "Apache-2.0 OR MIT", "license": "Apache-2.0 OR MIT",
"license_file": null, "license_file": null,
@ -2567,14 +2599,6 @@
"name": "ordered-float", "name": "ordered-float",
"repository": "https://github.com/reem/rust-ordered-float" "repository": "https://github.com/reem/rust-ordered-float"
}, },
{
"authors": "Daniel Salvadori <danaugrs@gmail.com>",
"description": "Provides a macro to simplify operator overloading.",
"license": "MIT",
"license_file": null,
"name": "overload",
"repository": "https://github.com/danaugrs/overload"
},
{ {
"authors": "Stjepan Glavina <stjepang@gmail.com>|The Rust Project Developers", "authors": "Stjepan Glavina <stjepang@gmail.com>|The Rust Project Developers",
"description": "Thread parking and unparking", "description": "Thread parking and unparking",
@ -3040,7 +3064,7 @@
"repository": "https://github.com/bluss/rawpointer/" "repository": "https://github.com/bluss/rawpointer/"
}, },
{ {
"authors": "Niko Matsakis <niko@alum.mit.edu>|Josh Stone <cuviper@gmail.com>", "authors": null,
"description": "Simple work-stealing parallelism for Rust", "description": "Simple work-stealing parallelism for Rust",
"license": "Apache-2.0 OR MIT", "license": "Apache-2.0 OR MIT",
"license_file": null, "license_file": null,
@ -3048,7 +3072,7 @@
"repository": "https://github.com/rayon-rs/rayon" "repository": "https://github.com/rayon-rs/rayon"
}, },
{ {
"authors": "Niko Matsakis <niko@alum.mit.edu>|Josh Stone <cuviper@gmail.com>", "authors": null,
"description": "Core APIs for Rayon", "description": "Core APIs for Rayon",
"license": "Apache-2.0 OR MIT", "license": "Apache-2.0 OR MIT",
"license_file": null, "license_file": null,
@ -3095,28 +3119,12 @@
"name": "regex", "name": "regex",
"repository": "https://github.com/rust-lang/regex" "repository": "https://github.com/rust-lang/regex"
}, },
{
"authors": "Andrew Gallant <jamslam@gmail.com>",
"description": "Automata construction and matching using regular expressions.",
"license": "MIT OR Unlicense",
"license_file": null,
"name": "regex-automata",
"repository": "https://github.com/BurntSushi/regex-automata"
},
{ {
"authors": "The Rust Project Developers|Andrew Gallant <jamslam@gmail.com>", "authors": "The Rust Project Developers|Andrew Gallant <jamslam@gmail.com>",
"description": "Automata construction and matching using regular expressions.", "description": "Automata construction and matching using regular expressions.",
"license": "Apache-2.0 OR MIT", "license": "Apache-2.0 OR MIT",
"license_file": null, "license_file": null,
"name": "regex-automata", "name": "regex-automata",
"repository": "https://github.com/rust-lang/regex/tree/master/regex-automata"
},
{
"authors": "The Rust Project Developers",
"description": "A regular expression parser.",
"license": "Apache-2.0 OR MIT",
"license_file": null,
"name": "regex-syntax",
"repository": "https://github.com/rust-lang/regex" "repository": "https://github.com/rust-lang/regex"
}, },
{ {
@ -3125,7 +3133,7 @@
"license": "Apache-2.0 OR MIT", "license": "Apache-2.0 OR MIT",
"license_file": null, "license_file": null,
"name": "regex-syntax", "name": "regex-syntax",
"repository": "https://github.com/rust-lang/regex/tree/master/regex-syntax" "repository": "https://github.com/rust-lang/regex"
}, },
{ {
"authors": "John-John Tedro <udoprog@tedro.se>", "authors": "John-John Tedro <udoprog@tedro.se>",
@ -3455,14 +3463,6 @@
"name": "serde_repr", "name": "serde_repr",
"repository": "https://github.com/dtolnay/serde-repr" "repository": "https://github.com/dtolnay/serde-repr"
}, },
{
"authors": null,
"description": "Serde-compatible spanned Value",
"license": "Apache-2.0 OR MIT",
"license_file": null,
"name": "serde_spanned",
"repository": "https://github.com/toml-rs/toml"
},
{ {
"authors": "Jacob Brown <kardeiz@gmail.com>", "authors": "Jacob Brown <kardeiz@gmail.com>",
"description": "De/serialize structs with named fields as array of values", "description": "De/serialize structs with named fields as array of values",
@ -3711,14 +3711,6 @@
"name": "syn", "name": "syn",
"repository": "https://github.com/dtolnay/syn" "repository": "https://github.com/dtolnay/syn"
}, },
{
"authors": "David Tolnay <dtolnay@gmail.com>",
"description": "Parser for Rust source code",
"license": "Apache-2.0 OR MIT",
"license_file": null,
"name": "syn",
"repository": "https://github.com/dtolnay/syn"
},
{ {
"authors": "Actyx AG <developer@actyx.io>", "authors": "Actyx AG <developer@actyx.io>",
"description": "A tool for enlisting the compiler's help in proving the absence of concurrency", "description": "A tool for enlisting the compiler's help in proving the absence of concurrency",
@ -3927,6 +3919,14 @@
"name": "tokio-rustls", "name": "tokio-rustls",
"repository": "https://github.com/rustls/tokio-rustls" "repository": "https://github.com/rustls/tokio-rustls"
}, },
{
"authors": "Daniel Abramov <dabramov@snapview.de>|Alexey Galakhov <agalakhov@snapview.de>",
"description": "Tokio binding for Tungstenite, the Lightweight stream-based WebSocket implementation",
"license": "MIT",
"license_file": null,
"name": "tokio-tungstenite",
"repository": "https://github.com/snapview/tokio-tungstenite"
},
{ {
"authors": "Tokio Contributors <team@tokio.rs>", "authors": "Tokio Contributors <team@tokio.rs>",
"description": "Additional utilities for working with Tokio.", "description": "Additional utilities for working with Tokio.",
@ -3951,14 +3951,6 @@
"name": "toml_edit", "name": "toml_edit",
"repository": "https://github.com/toml-rs/toml" "repository": "https://github.com/toml-rs/toml"
}, },
{
"authors": null,
"description": "A low-level interface for writing out TOML",
"license": "Apache-2.0 OR MIT",
"license_file": null,
"name": "toml_write",
"repository": "https://github.com/toml-rs/toml"
},
{ {
"authors": "Tower Maintainers <team@tower-rs.com>", "authors": "Tower Maintainers <team@tower-rs.com>",
"description": "Tower is a library of modular and reusable components for building robust clients and servers.", "description": "Tower is a library of modular and reusable components for building robust clients and servers.",
@ -4047,6 +4039,14 @@
"name": "try-lock", "name": "try-lock",
"repository": "https://github.com/seanmonstar/try-lock" "repository": "https://github.com/seanmonstar/try-lock"
}, },
{
"authors": "Alexey Galakhov|Daniel Abramov",
"description": "Lightweight stream-based WebSocket implementation",
"license": "Apache-2.0 OR MIT",
"license_file": null,
"name": "tungstenite",
"repository": "https://github.com/snapview/tungstenite-rs"
},
{ {
"authors": "Jacob Brown <kardeiz@gmail.com>", "authors": "Jacob Brown <kardeiz@gmail.com>",
"description": "Provides a typemap container with FxHashMap", "description": "Provides a typemap container with FxHashMap",
@ -4920,11 +4920,11 @@
"repository": "https://github.com/LukeMathWalker/wiremock-rs" "repository": "https://github.com/LukeMathWalker/wiremock-rs"
}, },
{ {
"authors": null, "authors": "Alex Crichton <alex@alexcrichton.com>",
"description": "Runtime support for the `wit-bindgen` crate", "description": "Rust bindings generator and runtime support for WIT and the component model. Used when compiling Rust programs to the component model.",
"license": "Apache-2.0 OR Apache-2.0 WITH LLVM-exception OR MIT", "license": "Apache-2.0 OR Apache-2.0 WITH LLVM-exception OR MIT",
"license_file": null, "license_file": null,
"name": "wit-bindgen-rt", "name": "wit-bindgen",
"repository": "https://github.com/bytecodealliance/wit-bindgen" "repository": "https://github.com/bytecodealliance/wit-bindgen"
}, },
{ {

@ -1 +1 @@
Subproject commit a599715d3c27ff2eb895c749f3534ab73d83dad1 Subproject commit 5897ef3a4589c123b7fa4c7fbd67f84d0b7ee13e

View file

@ -384,8 +384,6 @@ deck-config-which-deck = Which deck would you like to display options for?
deck-config-updating-cards = Updating cards: { $current_cards_count }/{ $total_cards_count }... deck-config-updating-cards = Updating cards: { $current_cards_count }/{ $total_cards_count }...
deck-config-invalid-parameters = The provided FSRS parameters are invalid. Leave them blank to use the default parameters. deck-config-invalid-parameters = The provided FSRS parameters are invalid. Leave them blank to use the default parameters.
deck-config-not-enough-history = Insufficient review history to perform this operation. deck-config-not-enough-history = Insufficient review history to perform this operation.
deck-config-unable-to-determine-desired-retention =
Unable to determine a minimum recommended retention.
deck-config-must-have-400-reviews = deck-config-must-have-400-reviews =
{ $count -> { $count ->
[one] Only { $count } review was found. [one] Only { $count } review was found.
@ -394,7 +392,6 @@ deck-config-must-have-400-reviews =
# Numbers that control how aggressively the FSRS algorithm schedules cards # Numbers that control how aggressively the FSRS algorithm schedules cards
deck-config-weights = FSRS parameters deck-config-weights = FSRS parameters
deck-config-compute-optimal-weights = Optimize FSRS parameters deck-config-compute-optimal-weights = Optimize FSRS parameters
deck-config-compute-minimum-recommended-retention = Minimum recommended retention
deck-config-optimize-button = Optimize Current Preset deck-config-optimize-button = Optimize Current Preset
# Indicates that a given function or label, provided via the "text" variable, operates slowly. # Indicates that a given function or label, provided via the "text" variable, operates slowly.
deck-config-slow-suffix = { $text } (slow) deck-config-slow-suffix = { $text } (slow)
@ -407,7 +404,6 @@ deck-config-historical-retention = Historical retention
deck-config-smaller-is-better = Smaller numbers indicate a better fit to your review history. deck-config-smaller-is-better = Smaller numbers indicate a better fit to your review history.
deck-config-steps-too-large-for-fsrs = When FSRS is enabled, steps of 1 day or more are not recommended. deck-config-steps-too-large-for-fsrs = When FSRS is enabled, steps of 1 day or more are not recommended.
deck-config-get-params = Get Params deck-config-get-params = Get Params
deck-config-predicted-minimum-recommended-retention = Minimum recommended retention: { $num }
deck-config-complete = { $num }% complete. deck-config-complete = { $num }% complete.
deck-config-iterations = Iteration: { $count }... deck-config-iterations = Iteration: { $count }...
deck-config-reschedule-cards-on-change = Reschedule cards on change deck-config-reschedule-cards-on-change = Reschedule cards on change
@ -468,12 +464,7 @@ deck-config-compute-optimal-weights-tooltip2 =
By default, parameters will be calculated from the review history of all decks using the current preset. You can By default, parameters will be calculated from the review history of all decks using the current preset. You can
optionally adjust the search before calculating the parameters, if you'd like to alter which cards are used for optionally adjust the search before calculating the parameters, if you'd like to alter which cards are used for
optimizing the parameters. optimizing the parameters.
deck-config-compute-optimal-retention-tooltip4 =
This tool will attempt to find the desired retention value
that will lead to the most material learnt, in the least amount of time. The calculated number can serve as a reference
when deciding what to set your desired retention to. You may wish to choose a higher desired retention if youre
willing to invest more study time to achieve it. Setting your desired retention lower than the minimum
is not recommended, as it will lead to a higher workload, because of the high forgetting rate.
deck-config-please-save-your-changes-first = Please save your changes first. deck-config-please-save-your-changes-first = Please save your changes first.
deck-config-workload-factor-change = Approximate workload: {$factor}x deck-config-workload-factor-change = Approximate workload: {$factor}x
(compared to {$previousDR}% desired retention) (compared to {$previousDR}% desired retention)
@ -544,6 +535,16 @@ deck-config-fsrs-good-fit = Health Check:
## NO NEED TO TRANSLATE. This text is no longer used by Anki, and will be removed in the future. ## NO NEED TO TRANSLATE. This text is no longer used by Anki, and will be removed in the future.
deck-config-unable-to-determine-desired-retention =
Unable to determine a minimum recommended retention.
deck-config-predicted-minimum-recommended-retention = Minimum recommended retention: { $num }
deck-config-compute-minimum-recommended-retention = Minimum recommended retention
deck-config-compute-optimal-retention-tooltip4 =
This tool will attempt to find the desired retention value
that will lead to the most material learnt, in the least amount of time. The calculated number can serve as a reference
when deciding what to set your desired retention to. You may wish to choose a higher desired retention if youre
willing to invest more study time to achieve it. Setting your desired retention lower than the minimum
is not recommended, as it will lead to a higher workload, because of the high forgetting rate.
deck-config-plotted-on-x-axis = (Plotted on the X-axis) deck-config-plotted-on-x-axis = (Plotted on the X-axis)
deck-config-a-100-day-interval = deck-config-a-100-day-interval =
{ $days -> { $days ->

@ -1 +1 @@
Subproject commit bb4207f3b8e9a7c428db282d12c75b850be532f3 Subproject commit dad4e2736a2b53dcdb52d79b5703dd464c05d666

View file

@ -82,6 +82,7 @@
"resolutions": { "resolutions": {
"canvas": "npm:empty-npm-package@1.0.0", "canvas": "npm:empty-npm-package@1.0.0",
"cookie": "0.7.0", "cookie": "0.7.0",
"devalue": "^5.3.2",
"vite": "6" "vite": "6"
}, },
"browserslist": [ "browserslist": [

View file

@ -59,7 +59,7 @@ message AddNoteRequest {
} }
message AddNoteResponse { message AddNoteResponse {
collection.OpChanges changes = 1; collection.OpChangesWithCount changes = 1;
int64 note_id = 2; int64 note_id = 2;
} }

View file

@ -528,7 +528,7 @@ class Collection(DeprecatedNamesMixin):
def new_note(self, notetype: NotetypeDict) -> Note: def new_note(self, notetype: NotetypeDict) -> Note:
return Note(self, notetype) return Note(self, notetype)
def add_note(self, note: Note, deck_id: DeckId) -> OpChanges: def add_note(self, note: Note, deck_id: DeckId) -> OpChangesWithCount:
hooks.note_will_be_added(self, note, deck_id) hooks.note_will_be_added(self, note, deck_id)
out = self._backend.add_note(note=note._to_backend_note(), deck_id=deck_id) out = self._backend.add_note(note=note._to_backend_note(), deck_id=deck_id)
note.id = NoteId(out.note_id) note.id = NoteId(out.note_id)

View file

@ -175,8 +175,8 @@ class MnemoFact:
def fact_view(self) -> type[MnemoFactView]: def fact_view(self) -> type[MnemoFactView]:
try: try:
fact_view = self.cards[0].fact_view_id fact_view = self.cards[0].fact_view_id
except IndexError as err: except IndexError:
raise Exception(f"Fact {self.id} has no cards") from err return FrontOnly
if fact_view.startswith("1.") or fact_view.startswith("1::"): if fact_view.startswith("1.") or fact_view.startswith("1::"):
return FrontOnly return FrontOnly

View file

@ -8,7 +8,7 @@ from collections.abc import Callable
import aqt.editor import aqt.editor
import aqt.forms import aqt.forms
from anki._legacy import deprecated from anki._legacy import deprecated
from anki.collection import OpChanges, SearchNode from anki.collection import OpChanges, OpChangesWithCount, SearchNode
from anki.decks import DeckId from anki.decks import DeckId
from anki.models import NotetypeId from anki.models import NotetypeId
from anki.notes import Note, NoteFieldsCheckResult, NoteId from anki.notes import Note, NoteFieldsCheckResult, NoteId
@ -294,13 +294,13 @@ class AddCards(QMainWindow):
target_deck_id = self.deck_chooser.selected_deck_id target_deck_id = self.deck_chooser.selected_deck_id
def on_success(changes: OpChanges) -> None: def on_success(changes: OpChangesWithCount) -> None:
# only used for detecting changed sticky fields on close # only used for detecting changed sticky fields on close
self._last_added_note = note self._last_added_note = note
self.addHistory(note) self.addHistory(note)
tooltip(tr.adding_added(), period=500) tooltip(tr.importing_cards_added(count=changes.count), period=500)
av_player.stop_and_clear_queue() av_player.stop_and_clear_queue()
self._load_new_note(sticky_fields_from=note) self._load_new_note(sticky_fields_from=note)
gui_hooks.add_cards_did_add_note(note) gui_hooks.add_cards_did_add_note(note)

View file

@ -18,7 +18,7 @@ def add_note(
parent: QWidget, parent: QWidget,
note: Note, note: Note,
target_deck_id: DeckId, target_deck_id: DeckId,
) -> CollectionOp[OpChanges]: ) -> CollectionOp[OpChangesWithCount]:
return CollectionOp(parent, lambda col: col.add_note(note, target_deck_id)) return CollectionOp(parent, lambda col: col.add_note(note, target_deck_id))

View file

@ -32,6 +32,7 @@ from aqt._macos_helper import macos_helper
from aqt.mpv import MPV, MPVBase, MPVCommandError from aqt.mpv import MPV, MPVBase, MPVCommandError
from aqt.qt import * from aqt.qt import *
from aqt.taskman import TaskManager from aqt.taskman import TaskManager
from aqt.theme import theme_manager
from aqt.utils import ( from aqt.utils import (
disable_help_button, disable_help_button,
restoreGeom, restoreGeom,
@ -743,7 +744,7 @@ class RecordDialog(QDialog):
def _setup_dialog(self) -> None: def _setup_dialog(self) -> None:
self.setWindowTitle("Anki") self.setWindowTitle("Anki")
icon = QLabel() icon = QLabel()
qicon = QIcon("icons:media-record.svg") qicon = theme_manager.icon_from_resources("icons:media-record.svg")
icon.setPixmap(qicon.pixmap(60, 60)) icon.setPixmap(qicon.pixmap(60, 60))
self.label = QLabel("...") self.label = QLabel("...")
hbox = QHBoxLayout() hbox = QHBoxLayout()

View file

@ -115,7 +115,7 @@ class ThemeManager:
# Workaround for Qt bug. First attempt was percent-escaping the chars, # Workaround for Qt bug. First attempt was percent-escaping the chars,
# but Qt can't handle that. # but Qt can't handle that.
# https://forum.qt.io/topic/55274/solved-qss-with-special-characters/11 # https://forum.qt.io/topic/55274/solved-qss-with-special-characters/11
path = re.sub(r"([\u00A1-\u00FF])", r"\\\1", path) path = re.sub(r"(['\u00A1-\u00FF])", r"\\\1", path)
return path return path
def icon_from_resources(self, path: str | ColoredIcon) -> QIcon: def icon_from_resources(self, path: str | ColoredIcon) -> QIcon:

View file

@ -30,6 +30,12 @@ lipo -create \
-output "$APP_LAUNCHER/Contents/MacOS/launcher" -output "$APP_LAUNCHER/Contents/MacOS/launcher"
cp "$OUTPUT_DIR/uv" "$APP_LAUNCHER/Contents/MacOS/" cp "$OUTPUT_DIR/uv" "$APP_LAUNCHER/Contents/MacOS/"
# Build install_name_tool stub
clang -arch arm64 -o "$OUTPUT_DIR/stub_arm64" stub.c
clang -arch x86_64 -o "$OUTPUT_DIR/stub_x86_64" stub.c
lipo -create "$OUTPUT_DIR/stub_arm64" "$OUTPUT_DIR/stub_x86_64" -output "$APP_LAUNCHER/Contents/MacOS/install_name_tool"
rm "$OUTPUT_DIR/stub_arm64" "$OUTPUT_DIR/stub_x86_64"
# Copy support files # Copy support files
ANKI_VERSION=$(cat ../../../.version | tr -d '\n') ANKI_VERSION=$(cat ../../../.version | tr -d '\n')
sed "s/ANKI_VERSION/$ANKI_VERSION/g" Info.plist > "$APP_LAUNCHER/Contents/Info.plist" sed "s/ANKI_VERSION/$ANKI_VERSION/g" Info.plist > "$APP_LAUNCHER/Contents/Info.plist"
@ -40,7 +46,7 @@ cp ../versions.py "$APP_LAUNCHER/Contents/Resources/"
# Codesign/bundle # Codesign/bundle
if [ -z "$NODMG" ]; then if [ -z "$NODMG" ]; then
for i in "$APP_LAUNCHER/Contents/MacOS/uv" "$APP_LAUNCHER/Contents/MacOS/launcher" "$APP_LAUNCHER"; do for i in "$APP_LAUNCHER/Contents/MacOS/uv" "$APP_LAUNCHER/Contents/MacOS/install_name_tool" "$APP_LAUNCHER/Contents/MacOS/launcher" "$APP_LAUNCHER"; do
codesign --force -vvvv -o runtime -s "Developer ID Application:" \ codesign --force -vvvv -o runtime -s "Developer ID Application:" \
--entitlements entitlements.python.xml \ --entitlements entitlements.python.xml \
"$i" "$i"

6
qt/launcher/mac/stub.c Normal file
View file

@ -0,0 +1,6 @@
// Copyright: Ankitects Pty Ltd and contributors
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
int main(void) {
return 0;
}

View file

@ -51,6 +51,8 @@ struct State {
previous_version: Option<String>, previous_version: Option<String>,
resources_dir: std::path::PathBuf, resources_dir: std::path::PathBuf,
venv_folder: std::path::PathBuf, venv_folder: std::path::PathBuf,
/// system Python + PyQt6 library mode
system_qt: bool,
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -88,9 +90,13 @@ fn main() {
} }
fn run() -> Result<()> { fn run() -> Result<()> {
let uv_install_root = dirs::data_local_dir() let uv_install_root = if let Ok(custom_root) = std::env::var("ANKI_LAUNCHER_VENV_ROOT") {
.context("Unable to determine data_dir")? std::path::PathBuf::from(custom_root)
.join("AnkiProgramFiles"); } else {
dirs::data_local_dir()
.context("Unable to determine data_dir")?
.join("AnkiProgramFiles")
};
let (exe_dir, resources_dir) = get_exe_and_resources_dirs()?; let (exe_dir, resources_dir) = get_exe_and_resources_dirs()?;
@ -113,6 +119,8 @@ fn run() -> Result<()> {
mirror_path: uv_install_root.join("mirror"), mirror_path: uv_install_root.join("mirror"),
pyproject_modified_by_user: false, // calculated later pyproject_modified_by_user: false, // calculated later
previous_version: None, previous_version: None,
system_qt: (cfg!(unix) && !cfg!(target_os = "macos"))
&& resources_dir.join("system_qt").exists(),
resources_dir, resources_dir,
venv_folder: uv_install_root.join(".venv"), venv_folder: uv_install_root.join(".venv"),
}; };
@ -193,8 +201,8 @@ fn extract_aqt_version(state: &State) -> Option<String> {
return None; return None;
} }
let output = Command::new(&state.uv_path) let output = uv_command(state)
.current_dir(&state.uv_install_root) .ok()?
.env("VIRTUAL_ENV", &state.venv_folder) .env("VIRTUAL_ENV", &state.venv_folder)
.args(["pip", "show", "aqt"]) .args(["pip", "show", "aqt"])
.output() .output()
@ -261,34 +269,45 @@ fn handle_version_install_or_update(state: &State, choice: MainMenuChoice) -> Re
None None
}; };
let have_venv = state.venv_folder.exists();
if cfg!(target_os = "macos") && !have_developer_tools() && !have_venv {
println!("If you see a pop-up about 'install_name_tool', you can cancel it, and ignore the warning below.\n");
}
// Prepare to sync the venv // Prepare to sync the venv
let mut command = Command::new(&state.uv_path); let mut command = uv_command(state)?;
command.current_dir(&state.uv_install_root);
// remove UV_* environment variables to avoid interference if cfg!(target_os = "macos") {
for (key, _) in std::env::vars() { // remove CONDA_PREFIX/bin from PATH to avoid conda interference
if key.starts_with("UV_") || key == "VIRTUAL_ENV" { if let Ok(conda_prefix) = std::env::var("CONDA_PREFIX") {
command.env_remove(key); if let Ok(current_path) = std::env::var("PATH") {
let conda_bin = format!("{conda_prefix}/bin");
let filtered_paths: Vec<&str> = current_path
.split(':')
.filter(|&path| path != conda_bin)
.collect();
let new_path = filtered_paths.join(":");
command.env("PATH", new_path);
}
}
// put our fake install_name_tool at the top of the path to override
// potential conflicts
if let Ok(current_path) = std::env::var("PATH") {
let exe_dir = std::env::current_exe()
.ok()
.and_then(|exe| exe.parent().map(|p| p.to_path_buf()));
if let Some(exe_dir) = exe_dir {
let new_path = format!("{}:{}", exe_dir.display(), current_path);
command.env("PATH", new_path);
}
} }
} }
// remove CONDA_PREFIX/bin from PATH to avoid conda interference // Create venv with system site packages if system Qt is enabled
#[cfg(target_os = "macos")] if state.system_qt {
if let Ok(conda_prefix) = std::env::var("CONDA_PREFIX") { let mut venv_command = uv_command(state)?;
if let Ok(current_path) = std::env::var("PATH") { venv_command.args([
let conda_bin = format!("{conda_prefix}/bin"); "venv",
let filtered_paths: Vec<&str> = current_path "--no-managed-python",
.split(':') "--system-site-packages",
.filter(|&path| path != conda_bin) "--no-config",
.collect(); ]);
let new_path = filtered_paths.join(":"); venv_command.ensure_success()?;
command.env("PATH", new_path);
}
} }
command command
@ -297,12 +316,18 @@ fn handle_version_install_or_update(state: &State, choice: MainMenuChoice) -> Re
.env( .env(
"UV_HTTP_TIMEOUT", "UV_HTTP_TIMEOUT",
std::env::var("UV_HTTP_TIMEOUT").unwrap_or_else(|_| "180".to_string()), std::env::var("UV_HTTP_TIMEOUT").unwrap_or_else(|_| "180".to_string()),
) );
.args(["sync", "--upgrade", "--managed-python", "--no-config"]);
// Add python version if .python-version file exists command.args(["sync", "--upgrade", "--no-config"]);
if !state.system_qt {
command.arg("--managed-python");
}
// Add python version if .python-version file exists (but not for system Qt)
if let Some(version) = &python_version_trimmed { if let Some(version) = &python_version_trimmed {
command.args(["--python", version]); if !state.system_qt {
command.args(["--python", version]);
}
} }
if state.no_cache_marker.exists() { if state.no_cache_marker.exists() {
@ -658,9 +683,8 @@ fn filter_and_normalize_versions(
fn fetch_versions(state: &State) -> Result<Vec<String>> { fn fetch_versions(state: &State) -> Result<Vec<String>> {
let versions_script = state.resources_dir.join("versions.py"); let versions_script = state.resources_dir.join("versions.py");
let mut cmd = Command::new(&state.uv_path); let mut cmd = uv_command(state)?;
cmd.current_dir(&state.uv_install_root) cmd.args(["run", "--no-project", "--no-config", "--managed-python"])
.args(["run", "--no-project", "--no-config", "--managed-python"])
.args(["--with", "pip-system-certs,requests[socks]"]); .args(["--with", "pip-system-certs,requests[socks]"]);
let python_version = read_file(&state.dist_python_version_path)?; let python_version = read_file(&state.dist_python_version_path)?;
@ -726,9 +750,20 @@ fn apply_version_kind(version_kind: &VersionKind, state: &State) -> Result<()> {
), ),
}; };
// Add mirror configuration if enabled let final_content = if state.system_qt {
let final_content = if let Some((python_mirror, pypi_mirror)) = get_mirror_urls(state)? { format!(
format!("{updated_content}\n\n[[tool.uv.index]]\nname = \"mirror\"\nurl = \"{pypi_mirror}\"\ndefault = true\n\n[tool.uv]\npython-install-mirror = \"{python_mirror}\"\n") concat!(
"{}\n\n[tool.uv]\n",
"override-dependencies = [\n",
" \"pyqt6; sys_platform=='never'\",\n",
" \"pyqt6-qt6; sys_platform=='never'\",\n",
" \"pyqt6-webengine; sys_platform=='never'\",\n",
" \"pyqt6-webengine-qt6; sys_platform=='never'\",\n",
" \"pyqt6_sip; sys_platform=='never'\"\n",
"]\n"
),
updated_content
)
} else { } else {
updated_content updated_content
}; };
@ -925,12 +960,25 @@ fn handle_uninstall(state: &State) -> Result<bool> {
Ok(true) Ok(true)
} }
fn have_developer_tools() -> bool { fn uv_command(state: &State) -> Result<Command> {
Command::new("xcode-select") let mut command = Command::new(&state.uv_path);
.args(["-p"]) command.current_dir(&state.uv_install_root);
.output()
.map(|output| output.status.success()) // remove UV_* environment variables to avoid interference
.unwrap_or(false) for (key, _) in std::env::vars() {
if key.starts_with("UV_") || key == "VIRTUAL_ENV" {
command.env_remove(key);
}
}
// Add mirror environment variable if enabled
if let Some((python_mirror, pypi_mirror)) = get_mirror_urls(state)? {
command
.env("UV_PYTHON_INSTALL_MIRROR", &python_mirror)
.env("UV_DEFAULT_INDEX", &pypi_mirror);
}
Ok(command)
} }
fn build_python_command(state: &State, args: &[String]) -> Result<Command> { fn build_python_command(state: &State, args: &[String]) -> Result<Command> {

View file

@ -62,7 +62,7 @@ pub fn prepare_for_launch_after_update(mut cmd: Command, root: &Path) -> Result<
pub fn relaunch_in_terminal() -> Result<()> { pub fn relaunch_in_terminal() -> Result<()> {
let current_exe = std::env::current_exe().context("Failed to get current executable path")?; let current_exe = std::env::current_exe().context("Failed to get current executable path")?;
Command::new("open") Command::new("open")
.args(["-a", "Terminal"]) .args(["-na", "Terminal"])
.arg(current_exe) .arg(current_exe)
.ensure_spawn()?; .ensure_spawn()?;
std::process::exit(0); std::process::exit(0);

View file

@ -22,6 +22,7 @@ inflections.workspace = true
anki_io.workspace = true anki_io.workspace = true
anyhow.workspace = true anyhow.workspace = true
itertools.workspace = true itertools.workspace = true
regex.workspace = true
[dependencies] [dependencies]
fluent.workspace = true fluent.workspace = true

View file

@ -4,6 +4,5 @@
// Include auto-generated content // Include auto-generated content
#![allow(clippy::all)] #![allow(clippy::all)]
#![allow(text_direction_codepoint_in_literal)]
include!(concat!(env!("OUT_DIR"), "/strings.rs")); include!(concat!(env!("OUT_DIR"), "/strings.rs"));

View file

@ -195,12 +195,30 @@ pub(crate) const {lang_name}: phf::Map<&str, &str> = phf::phf_map! {{",
.unwrap(); .unwrap();
for (module, contents) in modules { for (module, contents) in modules {
writeln!(buf, r###" "{module}" => r##"{contents}"##,"###).unwrap(); let escaped_contents = escape_unicode_control_chars(contents);
writeln!(
buf,
r###" "{module}" => r##"{escaped_contents}"##,"###
)
.unwrap();
} }
buf.push_str("};\n"); buf.push_str("};\n");
} }
fn escape_unicode_control_chars(input: &str) -> String {
use regex::Regex;
static RE: std::sync::OnceLock<Regex> = std::sync::OnceLock::new();
let re = RE.get_or_init(|| Regex::new(r"[\u{202a}-\u{202e}\u{2066}-\u{2069}]").unwrap());
re.replace_all(input, |caps: &regex::Captures| {
let c = caps.get(0).unwrap().as_str().chars().next().unwrap();
format!("\\u{{{:04x}}}", c as u32)
})
.into_owned()
}
fn lang_constant_name(lang: &str) -> String { fn lang_constant_name(lang: &str) -> String {
lang.to_ascii_uppercase().replace('-', "_") lang.to_ascii_uppercase().replace('-', "_")
} }

View file

@ -42,14 +42,14 @@ enum CheckableUrl {
} }
impl CheckableUrl { impl CheckableUrl {
fn url(&self) -> Cow<str> { fn url(&self) -> Cow<'_, str> {
match *self { match *self {
Self::HelpPage(page) => help_page_to_link(page).into(), Self::HelpPage(page) => help_page_to_link(page).into(),
Self::String(s) => s.into(), Self::String(s) => s.into(),
} }
} }
fn anchor(&self) -> Cow<str> { fn anchor(&self) -> Cow<'_, str> {
match *self { match *self {
Self::HelpPage(page) => help_page_link_suffix(page).into(), Self::HelpPage(page) => help_page_link_suffix(page).into(),
Self::String(s) => s.split('#').next_back().unwrap_or_default().into(), Self::String(s) => s.split('#').next_back().unwrap_or_default().into(),

View file

@ -94,7 +94,7 @@ impl BackendCollectionService for Backend {
} }
impl Backend { impl Backend {
pub(super) fn lock_open_collection(&self) -> Result<MutexGuard<Option<Collection>>> { pub(super) fn lock_open_collection(&self) -> Result<MutexGuard<'_, Option<Collection>>> {
let guard = self.col.lock().unwrap(); let guard = self.col.lock().unwrap();
guard guard
.is_some() .is_some()
@ -102,7 +102,7 @@ impl Backend {
.ok_or(AnkiError::CollectionNotOpen) .ok_or(AnkiError::CollectionNotOpen)
} }
pub(super) fn lock_closed_collection(&self) -> Result<MutexGuard<Option<Collection>>> { pub(super) fn lock_closed_collection(&self) -> Result<MutexGuard<'_, Option<Collection>>> {
let guard = self.col.lock().unwrap(); let guard = self.col.lock().unwrap();
guard guard
.is_none() .is_none()

View file

@ -34,7 +34,7 @@ pub fn prettify_av_tags<S: Into<String> + AsRef<str>>(txt: S) -> String {
/// Parse `txt` into [CardNodes] and return the result, /// Parse `txt` into [CardNodes] and return the result,
/// or [None] if it only contains text nodes. /// or [None] if it only contains text nodes.
fn nodes_or_text_only(txt: &str) -> Option<CardNodes> { fn nodes_or_text_only(txt: &str) -> Option<CardNodes<'_>> {
let nodes = CardNodes::parse(txt); let nodes = CardNodes::parse(txt);
(!nodes.text_only).then_some(nodes) (!nodes.text_only).then_some(nodes)
} }

View file

@ -103,13 +103,13 @@ fn is_not0<'parser, 'arr: 'parser, 's: 'parser>(
move |s| alt((is_not(arr), success(""))).parse(s) move |s| alt((is_not(arr), success(""))).parse(s)
} }
fn node(s: &str) -> IResult<Node> { fn node(s: &str) -> IResult<'_, Node<'_>> {
alt((sound_node, tag_node, text_node)).parse(s) alt((sound_node, tag_node, text_node)).parse(s)
} }
/// A sound tag `[sound:resource]`, where `resource` is pointing to a sound or /// A sound tag `[sound:resource]`, where `resource` is pointing to a sound or
/// video file. /// video file.
fn sound_node(s: &str) -> IResult<Node> { fn sound_node(s: &str) -> IResult<'_, Node<'_>> {
map( map(
delimited(tag("[sound:"), is_not("]"), tag("]")), delimited(tag("[sound:"), is_not("]"), tag("]")),
Node::SoundOrVideo, Node::SoundOrVideo,
@ -117,7 +117,7 @@ fn sound_node(s: &str) -> IResult<Node> {
.parse(s) .parse(s)
} }
fn take_till_potential_tag_start(s: &str) -> IResult<&str> { fn take_till_potential_tag_start(s: &str) -> IResult<'_, &str> {
// first char could be '[', but wasn't part of a node, so skip (eof ends parse) // first char could be '[', but wasn't part of a node, so skip (eof ends parse)
let (after, offset) = anychar(s).map(|(s, c)| (s, c.len_utf8()))?; let (after, offset) = anychar(s).map(|(s, c)| (s, c.len_utf8()))?;
Ok(match after.find('[') { Ok(match after.find('[') {
@ -127,9 +127,9 @@ fn take_till_potential_tag_start(s: &str) -> IResult<&str> {
} }
/// An Anki tag `[anki:tag...]...[/anki:tag]`. /// An Anki tag `[anki:tag...]...[/anki:tag]`.
fn tag_node(s: &str) -> IResult<Node> { fn tag_node(s: &str) -> IResult<'_, Node<'_>> {
/// Match the start of an opening tag and return its name. /// Match the start of an opening tag and return its name.
fn name(s: &str) -> IResult<&str> { fn name(s: &str) -> IResult<'_, &str> {
preceded(tag("[anki:"), is_not("] \t\r\n")).parse(s) preceded(tag("[anki:"), is_not("] \t\r\n")).parse(s)
} }
@ -139,12 +139,12 @@ fn tag_node(s: &str) -> IResult<Node> {
) -> impl FnMut(&'s str) -> IResult<'s, Vec<(&'s str, &'s str)>> + 'name { ) -> impl FnMut(&'s str) -> IResult<'s, Vec<(&'s str, &'s str)>> + 'name {
/// List of whitespace-separated `key=val` tuples, where `val` may be /// List of whitespace-separated `key=val` tuples, where `val` may be
/// empty. /// empty.
fn options(s: &str) -> IResult<Vec<(&str, &str)>> { fn options(s: &str) -> IResult<'_, Vec<(&str, &str)>> {
fn key(s: &str) -> IResult<&str> { fn key(s: &str) -> IResult<'_, &str> {
is_not("] \t\r\n=").parse(s) is_not("] \t\r\n=").parse(s)
} }
fn val(s: &str) -> IResult<&str> { fn val(s: &str) -> IResult<'_, &str> {
alt(( alt((
delimited(tag("\""), is_not0("\""), tag("\"")), delimited(tag("\""), is_not0("\""), tag("\"")),
is_not0("] \t\r\n\""), is_not0("] \t\r\n\""),
@ -197,7 +197,7 @@ fn tag_node(s: &str) -> IResult<Node> {
.parse(s) .parse(s)
} }
fn text_node(s: &str) -> IResult<Node> { fn text_node(s: &str) -> IResult<'_, Node<'_>> {
map(take_till_potential_tag_start, Node::Text).parse(s) map(take_till_potential_tag_start, Node::Text).parse(s)
} }

View file

@ -54,8 +54,8 @@ enum Token<'a> {
} }
/// Tokenize string /// Tokenize string
fn tokenize(mut text: &str) -> impl Iterator<Item = Token> { fn tokenize(mut text: &str) -> impl Iterator<Item = Token<'_>> {
fn open_cloze(text: &str) -> IResult<&str, Token> { fn open_cloze(text: &str) -> IResult<&str, Token<'_>> {
// opening brackets and 'c' // opening brackets and 'c'
let (text, _opening_brackets_and_c) = tag("{{c")(text)?; let (text, _opening_brackets_and_c) = tag("{{c")(text)?;
// following number // following number
@ -75,12 +75,12 @@ fn tokenize(mut text: &str) -> impl Iterator<Item = Token> {
Ok((text, Token::OpenCloze(digits))) Ok((text, Token::OpenCloze(digits)))
} }
fn close_cloze(text: &str) -> IResult<&str, Token> { fn close_cloze(text: &str) -> IResult<&str, Token<'_>> {
map(tag("}}"), |_| Token::CloseCloze).parse(text) map(tag("}}"), |_| Token::CloseCloze).parse(text)
} }
/// Match a run of text until an open/close marker is encountered. /// Match a run of text until an open/close marker is encountered.
fn normal_text(text: &str) -> IResult<&str, Token> { fn normal_text(text: &str) -> IResult<&str, Token<'_>> {
if text.is_empty() { if text.is_empty() {
return Err(nom::Err::Error(nom::error::make_error( return Err(nom::Err::Error(nom::error::make_error(
text, text,
@ -132,7 +132,7 @@ impl ExtractedCloze<'_> {
self.hint.unwrap_or("...") self.hint.unwrap_or("...")
} }
fn clozed_text(&self) -> Cow<str> { fn clozed_text(&self) -> Cow<'_, str> {
// happy efficient path? // happy efficient path?
if self.nodes.len() == 1 { if self.nodes.len() == 1 {
if let TextOrCloze::Text(text) = self.nodes.last().unwrap() { if let TextOrCloze::Text(text) = self.nodes.last().unwrap() {
@ -353,7 +353,7 @@ pub fn parse_image_occlusions(text: &str) -> Vec<ImageOcclusion> {
.collect() .collect()
} }
pub fn reveal_cloze_text(text: &str, cloze_ord: u16, question: bool) -> Cow<str> { pub fn reveal_cloze_text(text: &str, cloze_ord: u16, question: bool) -> Cow<'_, str> {
let mut buf = String::new(); let mut buf = String::new();
let mut active_cloze_found_in_text = false; let mut active_cloze_found_in_text = false;
for node in &parse_text_with_clozes(text) { for node in &parse_text_with_clozes(text) {
@ -376,7 +376,7 @@ pub fn reveal_cloze_text(text: &str, cloze_ord: u16, question: bool) -> Cow<str>
} }
} }
pub fn reveal_cloze_text_only(text: &str, cloze_ord: u16, question: bool) -> Cow<str> { pub fn reveal_cloze_text_only(text: &str, cloze_ord: u16, question: bool) -> Cow<'_, str> {
let mut output = Vec::new(); let mut output = Vec::new();
for node in &parse_text_with_clozes(text) { for node in &parse_text_with_clozes(text) {
reveal_cloze_text_in_nodes(node, cloze_ord, question, &mut output); reveal_cloze_text_in_nodes(node, cloze_ord, question, &mut output);
@ -384,7 +384,7 @@ pub fn reveal_cloze_text_only(text: &str, cloze_ord: u16, question: bool) -> Cow
output.join(", ").into() output.join(", ").into()
} }
pub fn extract_cloze_for_typing(text: &str, cloze_ord: u16) -> Cow<str> { pub fn extract_cloze_for_typing(text: &str, cloze_ord: u16) -> Cow<'_, str> {
let mut output = Vec::new(); let mut output = Vec::new();
for node in &parse_text_with_clozes(text) { for node in &parse_text_with_clozes(text) {
reveal_cloze_text_in_nodes(node, cloze_ord, false, &mut output); reveal_cloze_text_in_nodes(node, cloze_ord, false, &mut output);
@ -460,7 +460,7 @@ pub(crate) fn strip_clozes(text: &str) -> Cow<'_, str> {
CLOZE.replace_all(text, "$1") CLOZE.replace_all(text, "$1")
} }
fn strip_html_inside_mathjax(text: &str) -> Cow<str> { fn strip_html_inside_mathjax(text: &str) -> Cow<'_, str> {
MATHJAX.replace_all(text, |caps: &Captures| -> String { MATHJAX.replace_all(text, |caps: &Captures| -> String {
format!( format!(
"{}{}{}", "{}{}{}",

View file

@ -115,7 +115,7 @@ impl crate::services::DeckConfigService for Collection {
.storage .storage
.get_revlog_entries_for_searched_cards_in_card_order()?; .get_revlog_entries_for_searched_cards_in_card_order()?;
let config = guard.col.get_optimal_retention_parameters(revlogs)?; let mut config = guard.col.get_optimal_retention_parameters(revlogs)?;
let cards = guard let cards = guard
.col .col
.storage .storage
@ -125,6 +125,8 @@ impl crate::services::DeckConfigService for Collection {
.filter_map(|c| crate::card::Card::convert(c.clone(), days_elapsed, c.memory_state?)) .filter_map(|c| crate::card::Card::convert(c.clone(), days_elapsed, c.memory_state?))
.collect::<Vec<fsrs::Card>>(); .collect::<Vec<fsrs::Card>>();
config.deck_size = guard.cards;
let costs = (70u32..=99u32) let costs = (70u32..=99u32)
.into_par_iter() .into_par_iter()
.map(|dr| { .map(|dr| {

View file

@ -216,9 +216,6 @@ impl Collection {
for deck in self.storage.get_all_decks()? { for deck in self.storage.get_all_decks()? {
if let Ok(normal) = deck.normal() { if let Ok(normal) = deck.normal() {
let deck_id = deck.id; let deck_id = deck.id;
if let Some(desired_retention) = normal.desired_retention {
deck_desired_retention.insert(deck_id, desired_retention);
}
// previous order & params // previous order & params
let previous_config_id = DeckConfigId(normal.config_id); let previous_config_id = DeckConfigId(normal.config_id);
let previous_config = configs_before_update.get(&previous_config_id); let previous_config = configs_before_update.get(&previous_config_id);
@ -226,21 +223,23 @@ impl Collection {
.map(|c| c.inner.new_card_insert_order()) .map(|c| c.inner.new_card_insert_order())
.unwrap_or_default(); .unwrap_or_default();
let previous_params = previous_config.map(|c| c.fsrs_params()); let previous_params = previous_config.map(|c| c.fsrs_params());
let previous_retention = previous_config.map(|c| c.inner.desired_retention); let previous_preset_dr = previous_config.map(|c| c.inner.desired_retention);
let previous_deck_dr = normal.desired_retention;
let previous_dr = previous_deck_dr.or(previous_preset_dr);
let previous_easy_days = previous_config.map(|c| &c.inner.easy_days_percentages); let previous_easy_days = previous_config.map(|c| &c.inner.easy_days_percentages);
// if a selected (sub)deck, or its old config was removed, update deck to point // if a selected (sub)deck, or its old config was removed, update deck to point
// to new config // to new config
let current_config_id = if selected_deck_ids.contains(&deck.id) let (current_config_id, current_deck_dr) = if selected_deck_ids.contains(&deck.id)
|| !configs_after_update.contains_key(&previous_config_id) || !configs_after_update.contains_key(&previous_config_id)
{ {
let mut updated = deck.clone(); let mut updated = deck.clone();
updated.normal_mut()?.config_id = selected_config.id.0; updated.normal_mut()?.config_id = selected_config.id.0;
update_deck_limits(updated.normal_mut()?, &req.limits, today); update_deck_limits(updated.normal_mut()?, &req.limits, today);
self.update_deck_inner(&mut updated, deck, usn)?; self.update_deck_inner(&mut updated, deck, usn)?;
selected_config.id (selected_config.id, updated.normal()?.desired_retention)
} else { } else {
previous_config_id (previous_config_id, previous_deck_dr)
}; };
// if new order differs, deck needs re-sorting // if new order differs, deck needs re-sorting
@ -254,11 +253,12 @@ impl Collection {
// if params differ, memory state needs to be recomputed // if params differ, memory state needs to be recomputed
let current_params = current_config.map(|c| c.fsrs_params()); let current_params = current_config.map(|c| c.fsrs_params());
let current_retention = current_config.map(|c| c.inner.desired_retention); let current_preset_dr = current_config.map(|c| c.inner.desired_retention);
let current_dr = current_deck_dr.or(current_preset_dr);
let current_easy_days = current_config.map(|c| &c.inner.easy_days_percentages); let current_easy_days = current_config.map(|c| &c.inner.easy_days_percentages);
if fsrs_toggled if fsrs_toggled
|| previous_params != current_params || previous_params != current_params
|| previous_retention != current_retention || previous_dr != current_dr
|| (req.fsrs_reschedule && previous_easy_days != current_easy_days) || (req.fsrs_reschedule && previous_easy_days != current_easy_days)
{ {
decks_needing_memory_recompute decks_needing_memory_recompute
@ -266,7 +266,9 @@ impl Collection {
.or_default() .or_default()
.push(deck_id); .push(deck_id);
} }
if let Some(desired_retention) = current_deck_dr {
deck_desired_retention.insert(deck_id, desired_retention);
}
self.adjust_remaining_steps_in_deck(deck_id, previous_config, current_config, usn)?; self.adjust_remaining_steps_in_deck(deck_id, previous_config, current_config, usn)?;
} }
} }

View file

@ -191,7 +191,7 @@ fn invalid_char_for_deck_component(c: char) -> bool {
c.is_ascii_control() c.is_ascii_control()
} }
fn normalized_deck_name_component(comp: &str) -> Cow<str> { fn normalized_deck_name_component(comp: &str) -> Cow<'_, str> {
let mut out = normalize_to_nfc(comp); let mut out = normalize_to_nfc(comp);
if out.contains(invalid_char_for_deck_component) { if out.contains(invalid_char_for_deck_component) {
out = out.replace(invalid_char_for_deck_component, "").into(); out = out.replace(invalid_char_for_deck_component, "").into();

View file

@ -135,6 +135,8 @@ pub struct NormalDeckSchema11 {
review_limit_today: Option<DayLimit>, review_limit_today: Option<DayLimit>,
#[serde(default, deserialize_with = "default_on_invalid")] #[serde(default, deserialize_with = "default_on_invalid")]
new_limit_today: Option<DayLimit>, new_limit_today: Option<DayLimit>,
#[serde(default, deserialize_with = "default_on_invalid")]
desired_retention: Option<u32>,
} }
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
@ -249,6 +251,7 @@ impl Default for NormalDeckSchema11 {
new_limit: None, new_limit: None,
review_limit_today: None, review_limit_today: None,
new_limit_today: None, new_limit_today: None,
desired_retention: None,
} }
} }
} }
@ -325,7 +328,7 @@ impl From<NormalDeckSchema11> for NormalDeck {
new_limit: deck.new_limit, new_limit: deck.new_limit,
review_limit_today: deck.review_limit_today, review_limit_today: deck.review_limit_today,
new_limit_today: deck.new_limit_today, new_limit_today: deck.new_limit_today,
desired_retention: None, desired_retention: deck.desired_retention.map(|v| v as f32 / 100.0),
} }
} }
} }
@ -367,6 +370,7 @@ impl From<Deck> for DeckSchema11 {
new_limit: norm.new_limit, new_limit: norm.new_limit,
review_limit_today: norm.review_limit_today, review_limit_today: norm.review_limit_today,
new_limit_today: norm.new_limit_today, new_limit_today: norm.new_limit_today,
desired_retention: norm.desired_retention.map(|v| (v * 100.0) as u32),
common: deck.into(), common: deck.into(),
}), }),
DeckKind::Filtered(ref filt) => DeckSchema11::Filtered(FilteredDeckSchema11 { DeckKind::Filtered(ref filt) => DeckSchema11::Filtered(FilteredDeckSchema11 {
@ -431,7 +435,8 @@ static RESERVED_DECK_KEYS: Set<&'static str> = phf_set! {
"browserCollapsed", "browserCollapsed",
"extendRev", "extendRev",
"id", "id",
"collapsed" "collapsed",
"desiredRetention",
}; };
impl From<&Deck> for DeckTodaySchema11 { impl From<&Deck> for DeckTodaySchema11 {

View file

@ -231,7 +231,10 @@ fn svg_getter(notetypes: &[Notetype]) -> impl Fn(NotetypeId) -> bool {
} }
impl Collection { impl Collection {
fn gather_notes(&mut self, search: impl TryIntoSearch) -> Result<(Vec<Note>, NoteTableGuard)> { fn gather_notes(
&mut self,
search: impl TryIntoSearch,
) -> Result<(Vec<Note>, NoteTableGuard<'_>)> {
let guard = self.search_notes_into_table(search)?; let guard = self.search_notes_into_table(search)?;
guard guard
.col .col
@ -240,7 +243,7 @@ impl Collection {
.map(|notes| (notes, guard)) .map(|notes| (notes, guard))
} }
fn gather_cards(&mut self) -> Result<(Vec<Card>, CardTableGuard)> { fn gather_cards(&mut self) -> Result<(Vec<Card>, CardTableGuard<'_>)> {
let guard = self.search_cards_of_notes_into_table()?; let guard = self.search_cards_of_notes_into_table()?;
guard guard
.col .col

View file

@ -664,7 +664,7 @@ mod test {
self self
} }
fn import(self, col: &mut Collection) -> NoteContext { fn import(self, col: &mut Collection) -> NoteContext<'_> {
let mut progress_handler = col.new_progress_handler(); let mut progress_handler = col.new_progress_handler();
let media_map = Box::leak(Box::new(self.media_map)); let media_map = Box::leak(Box::new(self.media_map));
let mut ctx = NoteContext::new( let mut ctx = NoteContext::new(

View file

@ -154,7 +154,7 @@ pub(super) fn extract_media_entries(
} }
} }
pub(super) fn safe_normalized_file_name(name: &str) -> Result<Cow<str>> { pub(super) fn safe_normalized_file_name(name: &str) -> Result<Cow<'_, str>> {
if !filename_is_safe(name) { if !filename_is_safe(name) {
Err(AnkiError::ImportError { Err(AnkiError::ImportError {
source: ImportError::Corrupt, source: ImportError::Corrupt,

View file

@ -147,7 +147,7 @@ fn rendered_nodes_to_str(nodes: &[RenderedNode]) -> String {
.join("") .join("")
} }
fn field_to_record_field(field: &str, with_html: bool) -> Cow<str> { fn field_to_record_field(field: &str, with_html: bool) -> Cow<'_, str> {
let mut text = strip_redundant_sections(field); let mut text = strip_redundant_sections(field);
if !with_html { if !with_html {
text = text.map_cow(|t| html_to_text_line(t, false)); text = text.map_cow(|t| html_to_text_line(t, false));
@ -155,7 +155,7 @@ fn field_to_record_field(field: &str, with_html: bool) -> Cow<str> {
text text
} }
fn strip_redundant_sections(text: &str) -> Cow<str> { fn strip_redundant_sections(text: &str) -> Cow<'_, str> {
static RE: LazyLock<Regex> = LazyLock::new(|| { static RE: LazyLock<Regex> = LazyLock::new(|| {
Regex::new( Regex::new(
r"(?isx) r"(?isx)
@ -169,7 +169,7 @@ fn strip_redundant_sections(text: &str) -> Cow<str> {
RE.replace_all(text.as_ref(), "") RE.replace_all(text.as_ref(), "")
} }
fn strip_answer_side_question(text: &str) -> Cow<str> { fn strip_answer_side_question(text: &str) -> Cow<'_, str> {
static RE: LazyLock<Regex> = static RE: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"(?is)^.*<hr id=answer>\n*").unwrap()); LazyLock::new(|| Regex::new(r"(?is)^.*<hr id=answer>\n*").unwrap());
RE.replace_all(text.as_ref(), "") RE.replace_all(text.as_ref(), "")
@ -251,7 +251,7 @@ impl NoteContext {
.chain(self.tags(note)) .chain(self.tags(note))
} }
fn notetype_name(&self, note: &Note) -> Option<Cow<[u8]>> { fn notetype_name(&self, note: &Note) -> Option<Cow<'_, [u8]>> {
self.with_notetype.then(|| { self.with_notetype.then(|| {
self.notetypes self.notetypes
.get(&note.notetype_id) .get(&note.notetype_id)
@ -259,7 +259,7 @@ impl NoteContext {
}) })
} }
fn deck_name(&self, note: &Note) -> Option<Cow<[u8]>> { fn deck_name(&self, note: &Note) -> Option<Cow<'_, [u8]>> {
self.with_deck.then(|| { self.with_deck.then(|| {
self.deck_ids self.deck_ids
.get(&note.id) .get(&note.id)
@ -268,7 +268,7 @@ impl NoteContext {
}) })
} }
fn tags(&self, note: &Note) -> Option<Cow<[u8]>> { fn tags(&self, note: &Note) -> Option<Cow<'_, [u8]>> {
self.with_tags self.with_tags
.then(|| Cow::from(note.tags.join(" ").into_bytes())) .then(|| Cow::from(note.tags.join(" ").into_bytes()))
} }

View file

@ -511,7 +511,7 @@ impl NoteContext<'_> {
} }
impl Note { impl Note {
fn first_field_stripped(&self) -> Cow<str> { fn first_field_stripped(&self) -> Cow<'_, str> {
strip_html_preserving_media_filenames(&self.fields()[0]) strip_html_preserving_media_filenames(&self.fields()[0])
} }
} }
@ -623,7 +623,7 @@ impl ForeignNote {
.all(|(opt, field)| opt.as_ref().map(|s| s == field).unwrap_or(true)) .all(|(opt, field)| opt.as_ref().map(|s| s == field).unwrap_or(true))
} }
fn first_field_stripped(&self) -> Option<Cow<str>> { fn first_field_stripped(&self) -> Option<Cow<'_, str>> {
self.fields self.fields
.first() .first()
.and_then(|s| s.as_ref()) .and_then(|s| s.as_ref())

View file

@ -48,7 +48,7 @@ pub struct ExtractedLatex {
pub(crate) fn extract_latex_expanding_clozes( pub(crate) fn extract_latex_expanding_clozes(
text: &str, text: &str,
svg: bool, svg: bool,
) -> (Cow<str>, Vec<ExtractedLatex>) { ) -> (Cow<'_, str>, Vec<ExtractedLatex>) {
if text.contains("{{c") { if text.contains("{{c") {
let expanded = expand_clozes_to_reveal_latex(text); let expanded = expand_clozes_to_reveal_latex(text);
let (text, extracts) = extract_latex(&expanded, svg); let (text, extracts) = extract_latex(&expanded, svg);
@ -60,7 +60,7 @@ pub(crate) fn extract_latex_expanding_clozes(
/// Extract LaTeX from the provided text. /// Extract LaTeX from the provided text.
/// Expects cloze deletions to already be expanded. /// Expects cloze deletions to already be expanded.
pub fn extract_latex(text: &str, svg: bool) -> (Cow<str>, Vec<ExtractedLatex>) { pub fn extract_latex(text: &str, svg: bool) -> (Cow<'_, str>, Vec<ExtractedLatex>) {
let mut extracted = vec![]; let mut extracted = vec![];
let new_text = LATEX.replace_all(text, |caps: &Captures| { let new_text = LATEX.replace_all(text, |caps: &Captures| {
@ -84,7 +84,7 @@ pub fn extract_latex(text: &str, svg: bool) -> (Cow<str>, Vec<ExtractedLatex>) {
(new_text, extracted) (new_text, extracted)
} }
fn strip_html_for_latex(html: &str) -> Cow<str> { fn strip_html_for_latex(html: &str) -> Cow<'_, str> {
let mut out: Cow<str> = html.into(); let mut out: Cow<str> = html.into();
if let Cow::Owned(o) = LATEX_NEWLINES.replace_all(html, "\n") { if let Cow::Owned(o) = LATEX_NEWLINES.replace_all(html, "\n") {
out = o.into(); out = o.into();

View file

@ -91,7 +91,7 @@ fn nonbreaking_space(char: char) -> bool {
/// - Any problem characters are removed. /// - Any problem characters are removed.
/// - Windows device names like CON and PRN have '_' appended /// - Windows device names like CON and PRN have '_' appended
/// - The filename is limited to 120 bytes. /// - The filename is limited to 120 bytes.
pub(crate) fn normalize_filename(fname: &str) -> Cow<str> { pub(crate) fn normalize_filename(fname: &str) -> Cow<'_, str> {
let mut output = Cow::Borrowed(fname); let mut output = Cow::Borrowed(fname);
if !is_nfc(output.as_ref()) { if !is_nfc(output.as_ref()) {
@ -102,7 +102,7 @@ pub(crate) fn normalize_filename(fname: &str) -> Cow<str> {
} }
/// See normalize_filename(). This function expects NFC-normalized input. /// See normalize_filename(). This function expects NFC-normalized input.
pub(crate) fn normalize_nfc_filename(mut fname: Cow<str>) -> Cow<str> { pub(crate) fn normalize_nfc_filename(mut fname: Cow<'_, str>) -> Cow<'_, str> {
if fname.contains(disallowed_char) { if fname.contains(disallowed_char) {
fname = fname.replace(disallowed_char, "").into() fname = fname.replace(disallowed_char, "").into()
} }
@ -137,7 +137,7 @@ pub(crate) fn normalize_nfc_filename(mut fname: Cow<str>) -> Cow<str> {
/// but can be accessed as NFC. On these devices, if the filename /// but can be accessed as NFC. On these devices, if the filename
/// is otherwise valid, the filename is returned as NFC. /// is otherwise valid, the filename is returned as NFC.
#[allow(clippy::collapsible_else_if)] #[allow(clippy::collapsible_else_if)]
pub(crate) fn filename_if_normalized(fname: &str) -> Option<Cow<str>> { pub(crate) fn filename_if_normalized(fname: &str) -> Option<Cow<'_, str>> {
if cfg!(target_vendor = "apple") { if cfg!(target_vendor = "apple") {
if !is_nfc(fname) { if !is_nfc(fname) {
let as_nfc = fname.chars().nfc().collect::<String>(); let as_nfc = fname.chars().nfc().collect::<String>();
@ -208,7 +208,7 @@ pub(crate) fn add_hash_suffix_to_file_stem(fname: &str, hash: &Sha1Hash) -> Stri
} }
/// If filename is longer than max_bytes, truncate it. /// If filename is longer than max_bytes, truncate it.
fn truncate_filename(fname: &str, max_bytes: usize) -> Cow<str> { fn truncate_filename(fname: &str, max_bytes: usize) -> Cow<'_, str> {
if fname.len() <= max_bytes { if fname.len() <= max_bytes {
return Cow::Borrowed(fname); return Cow::Borrowed(fname);
} }

View file

@ -87,7 +87,7 @@ impl TryFrom<anki_proto::notes::AddNoteRequest> for AddNoteRequest {
} }
impl Collection { impl Collection {
pub fn add_note(&mut self, note: &mut Note, did: DeckId) -> Result<OpOutput<()>> { pub fn add_note(&mut self, note: &mut Note, did: DeckId) -> Result<OpOutput<usize>> {
self.transact(Op::AddNote, |col| col.add_note_inner(note, did)) self.transact(Op::AddNote, |col| col.add_note_inner(note, did))
} }
@ -372,7 +372,7 @@ impl Collection {
Ok(()) Ok(())
} }
pub(crate) fn add_note_inner(&mut self, note: &mut Note, did: DeckId) -> Result<()> { pub(crate) fn add_note_inner(&mut self, note: &mut Note, did: DeckId) -> Result<usize> {
let nt = self let nt = self
.get_notetype(note.notetype_id)? .get_notetype(note.notetype_id)?
.or_invalid("missing note type")?; .or_invalid("missing note type")?;
@ -383,10 +383,11 @@ impl Collection {
note.prepare_for_update(ctx.notetype, normalize_text)?; note.prepare_for_update(ctx.notetype, normalize_text)?;
note.set_modified(ctx.usn); note.set_modified(ctx.usn);
self.add_note_only_undoable(note)?; self.add_note_only_undoable(note)?;
self.generate_cards_for_new_note(&ctx, note, did)?; let count = self.generate_cards_for_new_note(&ctx, note, did)?;
self.set_last_deck_for_notetype(note.notetype_id, did)?; self.set_last_deck_for_notetype(note.notetype_id, did)?;
self.set_last_notetype_for_deck(did, note.notetype_id)?; self.set_last_notetype_for_deck(did, note.notetype_id)?;
self.set_current_notetype_id(note.notetype_id) self.set_current_notetype_id(note.notetype_id)?;
Ok(count)
} }
pub fn update_note(&mut self, note: &mut Note) -> Result<OpOutput<()>> { pub fn update_note(&mut self, note: &mut Note) -> Result<OpOutput<()>> {

View file

@ -215,7 +215,7 @@ impl Collection {
ctx: &CardGenContext<impl Deref<Target = Notetype>>, ctx: &CardGenContext<impl Deref<Target = Notetype>>,
note: &Note, note: &Note,
target_deck_id: DeckId, target_deck_id: DeckId,
) -> Result<()> { ) -> Result<usize> {
self.generate_cards_for_note( self.generate_cards_for_note(
ctx, ctx,
note, note,
@ -231,7 +231,8 @@ impl Collection {
note: &Note, note: &Note,
) -> Result<()> { ) -> Result<()> {
let existing = self.storage.existing_cards_for_note(note.id)?; let existing = self.storage.existing_cards_for_note(note.id)?;
self.generate_cards_for_note(ctx, note, &existing, ctx.last_deck, &mut Default::default()) self.generate_cards_for_note(ctx, note, &existing, ctx.last_deck, &mut Default::default())?;
Ok(())
} }
fn generate_cards_for_note( fn generate_cards_for_note(
@ -241,12 +242,13 @@ impl Collection {
existing: &[AlreadyGeneratedCardInfo], existing: &[AlreadyGeneratedCardInfo],
target_deck_id: Option<DeckId>, target_deck_id: Option<DeckId>,
cache: &mut CardGenCache, cache: &mut CardGenCache,
) -> Result<()> { ) -> Result<usize> {
let cards = ctx.new_cards_required(note, existing, true); let cards = ctx.new_cards_required(note, existing, true);
if cards.is_empty() { if cards.is_empty() {
return Ok(()); return Ok(0);
} }
self.add_generated_cards(note.id, &cards, target_deck_id, cache) self.add_generated_cards(note.id, &cards, target_deck_id, cache)?;
Ok(cards.len())
} }
pub(crate) fn generate_cards_for_notetype( pub(crate) fn generate_cards_for_notetype(

View file

@ -25,7 +25,7 @@ pub struct RenderCardOutput {
impl RenderCardOutput { impl RenderCardOutput {
/// The question text. This is only valid to call when partial_render=false. /// The question text. This is only valid to call when partial_render=false.
pub fn question(&self) -> Cow<str> { pub fn question(&self) -> Cow<'_, str> {
match self.qnodes.as_slice() { match self.qnodes.as_slice() {
[RenderedNode::Text { text }] => text.into(), [RenderedNode::Text { text }] => text.into(),
_ => "not fully rendered".into(), _ => "not fully rendered".into(),
@ -33,7 +33,7 @@ impl RenderCardOutput {
} }
/// The answer text. This is only valid to call when partial_render=false. /// The answer text. This is only valid to call when partial_render=false.
pub fn answer(&self) -> Cow<str> { pub fn answer(&self) -> Cow<'_, str> {
match self.anodes.as_slice() { match self.anodes.as_slice() {
[RenderedNode::Text { text }] => text.into(), [RenderedNode::Text { text }] => text.into(),
_ => "not fully rendered".into(), _ => "not fully rendered".into(),

View file

@ -97,7 +97,7 @@ fn create_review_priority_fn(
// Interval-based ordering // Interval-based ordering
IntervalsAscending => wrap!(|c, _w| c.interval as i32), IntervalsAscending => wrap!(|c, _w| c.interval as i32),
IntervalsDescending => wrap!(|c, _w| -(c.interval as i32)), IntervalsDescending => wrap!(|c, _w| (c.interval as i32).saturating_neg()),
// Retrievability-based ordering // Retrievability-based ordering
RetrievabilityAscending => { RetrievabilityAscending => {
wrap!(move |c, w| (c.retrievability(w) * 1000.0) as i32) wrap!(move |c, w| (c.retrievability(w) * 1000.0) as i32)

View file

@ -61,28 +61,26 @@ impl QueueBuilder {
} }
fn gather_new_cards(&mut self, col: &mut Collection) -> Result<()> { fn gather_new_cards(&mut self, col: &mut Collection) -> Result<()> {
let salt = Self::knuth_salt(self.context.timing.days_elapsed);
match self.context.sort_options.new_gather_priority { match self.context.sort_options.new_gather_priority {
NewCardGatherPriority::Deck => { NewCardGatherPriority::Deck => {
self.gather_new_cards_by_deck(col, NewCardSorting::LowestPosition) self.gather_new_cards_by_deck(col, NewCardSorting::LowestPosition)
} }
NewCardGatherPriority::DeckThenRandomNotes => self.gather_new_cards_by_deck( NewCardGatherPriority::DeckThenRandomNotes => {
col, self.gather_new_cards_by_deck(col, NewCardSorting::RandomNotes(salt))
NewCardSorting::RandomNotes(self.context.timing.days_elapsed), }
),
NewCardGatherPriority::LowestPosition => { NewCardGatherPriority::LowestPosition => {
self.gather_new_cards_sorted(col, NewCardSorting::LowestPosition) self.gather_new_cards_sorted(col, NewCardSorting::LowestPosition)
} }
NewCardGatherPriority::HighestPosition => { NewCardGatherPriority::HighestPosition => {
self.gather_new_cards_sorted(col, NewCardSorting::HighestPosition) self.gather_new_cards_sorted(col, NewCardSorting::HighestPosition)
} }
NewCardGatherPriority::RandomNotes => self.gather_new_cards_sorted( NewCardGatherPriority::RandomNotes => {
col, self.gather_new_cards_sorted(col, NewCardSorting::RandomNotes(salt))
NewCardSorting::RandomNotes(self.context.timing.days_elapsed), }
), NewCardGatherPriority::RandomCards => {
NewCardGatherPriority::RandomCards => self.gather_new_cards_sorted( self.gather_new_cards_sorted(col, NewCardSorting::RandomCards(salt))
col, }
NewCardSorting::RandomCards(self.context.timing.days_elapsed),
),
} }
} }
@ -169,4 +167,10 @@ impl QueueBuilder {
true true
} }
} }
// Generates a salt for use with fnvhash. Useful to increase randomness
// when the base salt is a small integer.
fn knuth_salt(base_salt: u32) -> u32 {
base_salt.wrapping_mul(2654435761)
}
} }

View file

@ -174,7 +174,7 @@ impl LoadBalancer {
&self, &self,
note_id: Option<NoteId>, note_id: Option<NoteId>,
deckconfig_id: DeckConfigId, deckconfig_id: DeckConfigId,
) -> LoadBalancerContext { ) -> LoadBalancerContext<'_> {
LoadBalancerContext { LoadBalancerContext {
load_balancer: self, load_balancer: self,
note_id, note_id,

View file

@ -226,7 +226,7 @@ impl Collection {
&mut self, &mut self,
search: impl TryIntoSearch, search: impl TryIntoSearch,
mode: SortMode, mode: SortMode,
) -> Result<CardTableGuard> { ) -> Result<CardTableGuard<'_>> {
let top_node = search.try_into_search()?; let top_node = search.try_into_search()?;
let writer = SqlWriter::new(self, ReturnItemType::Cards); let writer = SqlWriter::new(self, ReturnItemType::Cards);
let want_order = mode != SortMode::NoOrder; let want_order = mode != SortMode::NoOrder;
@ -299,7 +299,7 @@ impl Collection {
pub(crate) fn search_notes_into_table( pub(crate) fn search_notes_into_table(
&mut self, &mut self,
search: impl TryIntoSearch, search: impl TryIntoSearch,
) -> Result<NoteTableGuard> { ) -> Result<NoteTableGuard<'_>> {
let top_node = search.try_into_search()?; let top_node = search.try_into_search()?;
let writer = SqlWriter::new(self, ReturnItemType::Notes); let writer = SqlWriter::new(self, ReturnItemType::Notes);
let mode = SortMode::NoOrder; let mode = SortMode::NoOrder;
@ -320,7 +320,7 @@ impl Collection {
/// Place the ids of cards with notes in 'search_nids' into 'search_cids'. /// Place the ids of cards with notes in 'search_nids' into 'search_cids'.
/// Returns number of added cards. /// Returns number of added cards.
pub(crate) fn search_cards_of_notes_into_table(&mut self) -> Result<CardTableGuard> { pub(crate) fn search_cards_of_notes_into_table(&mut self) -> Result<CardTableGuard<'_>> {
self.storage.setup_searched_cards_table()?; self.storage.setup_searched_cards_table()?;
let cards = self.storage.search_cards_of_notes_into_table()?; let cards = self.storage.search_cards_of_notes_into_table()?;
Ok(CardTableGuard { cards, col: self }) Ok(CardTableGuard { cards, col: self })

View file

@ -158,7 +158,7 @@ pub fn parse(input: &str) -> Result<Vec<Node>> {
/// Zero or more nodes inside brackets, eg 'one OR two -three'. /// Zero or more nodes inside brackets, eg 'one OR two -three'.
/// Empty vec must be handled by caller. /// Empty vec must be handled by caller.
fn group_inner(input: &str) -> IResult<Vec<Node>> { fn group_inner(input: &str) -> IResult<'_, Vec<Node>> {
let mut remaining = input; let mut remaining = input;
let mut nodes = vec![]; let mut nodes = vec![];
@ -203,16 +203,16 @@ fn group_inner(input: &str) -> IResult<Vec<Node>> {
Ok((remaining, nodes)) Ok((remaining, nodes))
} }
fn whitespace0(s: &str) -> IResult<Vec<char>> { fn whitespace0(s: &str) -> IResult<'_, Vec<char>> {
many0(one_of(" \u{3000}")).parse(s) many0(one_of(" \u{3000}")).parse(s)
} }
/// Optional leading space, then a (negated) group or text /// Optional leading space, then a (negated) group or text
fn node(s: &str) -> IResult<Node> { fn node(s: &str) -> IResult<'_, Node> {
preceded(whitespace0, alt((negated_node, group, text))).parse(s) preceded(whitespace0, alt((negated_node, group, text))).parse(s)
} }
fn negated_node(s: &str) -> IResult<Node> { fn negated_node(s: &str) -> IResult<'_, Node> {
map(preceded(char('-'), alt((group, text))), |node| { map(preceded(char('-'), alt((group, text))), |node| {
Node::Not(Box::new(node)) Node::Not(Box::new(node))
}) })
@ -220,7 +220,7 @@ fn negated_node(s: &str) -> IResult<Node> {
} }
/// One or more nodes surrounded by brackets, eg (one OR two) /// One or more nodes surrounded by brackets, eg (one OR two)
fn group(s: &str) -> IResult<Node> { fn group(s: &str) -> IResult<'_, Node> {
let (opened, _) = char('(')(s)?; let (opened, _) = char('(')(s)?;
let (tail, inner) = group_inner(opened)?; let (tail, inner) = group_inner(opened)?;
if let Some(remaining) = tail.strip_prefix(')') { if let Some(remaining) = tail.strip_prefix(')') {
@ -235,18 +235,18 @@ fn group(s: &str) -> IResult<Node> {
} }
/// Either quoted or unquoted text /// Either quoted or unquoted text
fn text(s: &str) -> IResult<Node> { fn text(s: &str) -> IResult<'_, Node> {
alt((quoted_term, partially_quoted_term, unquoted_term)).parse(s) alt((quoted_term, partially_quoted_term, unquoted_term)).parse(s)
} }
/// Quoted text, including the outer double quotes. /// Quoted text, including the outer double quotes.
fn quoted_term(s: &str) -> IResult<Node> { fn quoted_term(s: &str) -> IResult<'_, Node> {
let (remaining, term) = quoted_term_str(s)?; let (remaining, term) = quoted_term_str(s)?;
Ok((remaining, Node::Search(search_node_for_text(term)?))) Ok((remaining, Node::Search(search_node_for_text(term)?)))
} }
/// eg deck:"foo bar" - quotes must come after the : /// eg deck:"foo bar" - quotes must come after the :
fn partially_quoted_term(s: &str) -> IResult<Node> { fn partially_quoted_term(s: &str) -> IResult<'_, Node> {
let (remaining, (key, val)) = separated_pair( let (remaining, (key, val)) = separated_pair(
escaped(is_not("\"(): \u{3000}\\"), '\\', none_of(" \u{3000}")), escaped(is_not("\"(): \u{3000}\\"), '\\', none_of(" \u{3000}")),
char(':'), char(':'),
@ -260,7 +260,7 @@ fn partially_quoted_term(s: &str) -> IResult<Node> {
} }
/// Unquoted text, terminated by whitespace or unescaped ", ( or ) /// Unquoted text, terminated by whitespace or unescaped ", ( or )
fn unquoted_term(s: &str) -> IResult<Node> { fn unquoted_term(s: &str) -> IResult<'_, Node> {
match escaped(is_not("\"() \u{3000}\\"), '\\', none_of(" \u{3000}"))(s) { match escaped(is_not("\"() \u{3000}\\"), '\\', none_of(" \u{3000}"))(s) {
Ok((tail, term)) => { Ok((tail, term)) => {
if term.is_empty() { if term.is_empty() {
@ -297,7 +297,7 @@ fn unquoted_term(s: &str) -> IResult<Node> {
} }
/// Non-empty string delimited by unescaped double quotes. /// Non-empty string delimited by unescaped double quotes.
fn quoted_term_str(s: &str) -> IResult<&str> { fn quoted_term_str(s: &str) -> IResult<'_, &str> {
let (opened, _) = char('"')(s)?; let (opened, _) = char('"')(s)?;
if let Ok((tail, inner)) = if let Ok((tail, inner)) =
escaped::<_, ParseError, _, _>(is_not(r#""\"#), '\\', anychar).parse(opened) escaped::<_, ParseError, _, _>(is_not(r#""\"#), '\\', anychar).parse(opened)
@ -321,7 +321,7 @@ fn quoted_term_str(s: &str) -> IResult<&str> {
/// Determine if text is a qualified search, and handle escaped chars. /// Determine if text is a qualified search, and handle escaped chars.
/// Expect well-formed input: unempty and no trailing \. /// Expect well-formed input: unempty and no trailing \.
fn search_node_for_text(s: &str) -> ParseResult<SearchNode> { fn search_node_for_text(s: &str) -> ParseResult<'_, SearchNode> {
// leading : is only possible error for well-formed input // leading : is only possible error for well-formed input
let (tail, head) = verify(escaped(is_not(r":\"), '\\', anychar), |t: &str| { let (tail, head) = verify(escaped(is_not(r":\"), '\\', anychar), |t: &str| {
!t.is_empty() !t.is_empty()
@ -369,7 +369,7 @@ fn search_node_for_text_with_argument<'a>(
}) })
} }
fn parse_tag(s: &str) -> ParseResult<SearchNode> { fn parse_tag(s: &str) -> ParseResult<'_, SearchNode> {
Ok(if let Some(re) = s.strip_prefix("re:") { Ok(if let Some(re) = s.strip_prefix("re:") {
SearchNode::Tag { SearchNode::Tag {
tag: unescape_quotes(re), tag: unescape_quotes(re),
@ -383,7 +383,7 @@ fn parse_tag(s: &str) -> ParseResult<SearchNode> {
}) })
} }
fn parse_template(s: &str) -> ParseResult<SearchNode> { fn parse_template(s: &str) -> ParseResult<'_, SearchNode> {
Ok(SearchNode::CardTemplate(match s.parse::<u16>() { Ok(SearchNode::CardTemplate(match s.parse::<u16>() {
Ok(n) => TemplateKind::Ordinal(n.max(1) - 1), Ok(n) => TemplateKind::Ordinal(n.max(1) - 1),
Err(_) => TemplateKind::Name(unescape(s)?), Err(_) => TemplateKind::Name(unescape(s)?),
@ -391,7 +391,7 @@ fn parse_template(s: &str) -> ParseResult<SearchNode> {
} }
/// flag:0-7 /// flag:0-7
fn parse_flag(s: &str) -> ParseResult<SearchNode> { fn parse_flag(s: &str) -> ParseResult<'_, SearchNode> {
if let Ok(flag) = s.parse::<u8>() { if let Ok(flag) = s.parse::<u8>() {
if flag > 7 { if flag > 7 {
Err(parse_failure(s, FailKind::InvalidFlag)) Err(parse_failure(s, FailKind::InvalidFlag))
@ -404,7 +404,7 @@ fn parse_flag(s: &str) -> ParseResult<SearchNode> {
} }
/// eg resched:3 /// eg resched:3
fn parse_resched(s: &str) -> ParseResult<SearchNode> { fn parse_resched(s: &str) -> ParseResult<'_, SearchNode> {
parse_u32(s, "resched:").map(|days| SearchNode::Rated { parse_u32(s, "resched:").map(|days| SearchNode::Rated {
days, days,
ease: RatingKind::ManualReschedule, ease: RatingKind::ManualReschedule,
@ -412,7 +412,7 @@ fn parse_resched(s: &str) -> ParseResult<SearchNode> {
} }
/// eg prop:ivl>3, prop:ease!=2.5 /// eg prop:ivl>3, prop:ease!=2.5
fn parse_prop(prop_clause: &str) -> ParseResult<SearchNode> { fn parse_prop(prop_clause: &str) -> ParseResult<'_, SearchNode> {
let (tail, prop) = alt(( let (tail, prop) = alt((
tag("ivl"), tag("ivl"),
tag("due"), tag("due"),
@ -580,23 +580,23 @@ fn parse_prop_rated<'a>(num: &str, context: &'a str) -> ParseResult<'a, Property
} }
/// eg added:1 /// eg added:1
fn parse_added(s: &str) -> ParseResult<SearchNode> { fn parse_added(s: &str) -> ParseResult<'_, SearchNode> {
parse_u32(s, "added:").map(|n| SearchNode::AddedInDays(n.max(1))) parse_u32(s, "added:").map(|n| SearchNode::AddedInDays(n.max(1)))
} }
/// eg edited:1 /// eg edited:1
fn parse_edited(s: &str) -> ParseResult<SearchNode> { fn parse_edited(s: &str) -> ParseResult<'_, SearchNode> {
parse_u32(s, "edited:").map(|n| SearchNode::EditedInDays(n.max(1))) parse_u32(s, "edited:").map(|n| SearchNode::EditedInDays(n.max(1)))
} }
/// eg introduced:1 /// eg introduced:1
fn parse_introduced(s: &str) -> ParseResult<SearchNode> { fn parse_introduced(s: &str) -> ParseResult<'_, SearchNode> {
parse_u32(s, "introduced:").map(|n| SearchNode::IntroducedInDays(n.max(1))) parse_u32(s, "introduced:").map(|n| SearchNode::IntroducedInDays(n.max(1)))
} }
/// eg rated:3 or rated:10:2 /// eg rated:3 or rated:10:2
/// second arg must be between 1-4 /// second arg must be between 1-4
fn parse_rated(s: &str) -> ParseResult<SearchNode> { fn parse_rated(s: &str) -> ParseResult<'_, SearchNode> {
let mut it = s.splitn(2, ':'); let mut it = s.splitn(2, ':');
let days = parse_u32(it.next().unwrap(), "rated:")?.max(1); let days = parse_u32(it.next().unwrap(), "rated:")?.max(1);
let button = parse_answer_button(it.next(), s)?; let button = parse_answer_button(it.next(), s)?;
@ -604,7 +604,7 @@ fn parse_rated(s: &str) -> ParseResult<SearchNode> {
} }
/// eg is:due /// eg is:due
fn parse_state(s: &str) -> ParseResult<SearchNode> { fn parse_state(s: &str) -> ParseResult<'_, SearchNode> {
use StateKind::*; use StateKind::*;
Ok(SearchNode::State(match s { Ok(SearchNode::State(match s {
"new" => New, "new" => New,
@ -624,7 +624,7 @@ fn parse_state(s: &str) -> ParseResult<SearchNode> {
})) }))
} }
fn parse_mid(s: &str) -> ParseResult<SearchNode> { fn parse_mid(s: &str) -> ParseResult<'_, SearchNode> {
parse_i64(s, "mid:").map(|n| SearchNode::NotetypeId(n.into())) parse_i64(s, "mid:").map(|n| SearchNode::NotetypeId(n.into()))
} }
@ -646,7 +646,7 @@ fn check_id_list<'a>(s: &'a str, context: &str) -> ParseResult<'a, &'a str> {
} }
/// eg dupe:1231,hello /// eg dupe:1231,hello
fn parse_dupe(s: &str) -> ParseResult<SearchNode> { fn parse_dupe(s: &str) -> ParseResult<'_, SearchNode> {
let mut it = s.splitn(2, ','); let mut it = s.splitn(2, ',');
let ntid = parse_i64(it.next().unwrap(), s)?; let ntid = parse_i64(it.next().unwrap(), s)?;
if let Some(text) = it.next() { if let Some(text) = it.next() {
@ -700,7 +700,7 @@ fn unescape_quotes_and_backslashes(s: &str) -> String {
} }
/// Unescape chars with special meaning to the parser. /// Unescape chars with special meaning to the parser.
fn unescape(txt: &str) -> ParseResult<String> { fn unescape(txt: &str) -> ParseResult<'_, String> {
if let Some(seq) = invalid_escape_sequence(txt) { if let Some(seq) = invalid_escape_sequence(txt) {
Err(parse_failure( Err(parse_failure(
txt, txt,

View file

@ -403,7 +403,9 @@ impl super::SqliteStorage {
let last_revlog_info = get_last_revlog_info(&revlog); let last_revlog_info = get_last_revlog_info(&revlog);
for (card_id, last_revlog_info) in last_revlog_info { for (card_id, last_revlog_info) in last_revlog_info {
let card = self.get_card(card_id)?; let card = self.get_card(card_id)?;
if let Some(mut card) = card { if last_revlog_info.last_reviewed_at.is_none() {
continue;
} else if let Some(mut card) = card {
if card.ctype != CardType::New && card.last_review_time.is_none() { if card.ctype != CardType::New && card.last_review_time.is_none() {
card.last_review_time = last_revlog_info.last_reviewed_at; card.last_review_time = last_revlog_info.last_reviewed_at;
self.update_card(&card)?; self.update_card(&card)?;

View file

@ -155,7 +155,7 @@ fn invalid_char_for_tag(c: char) -> bool {
c.is_ascii_control() || is_tag_separator(c) c.is_ascii_control() || is_tag_separator(c)
} }
fn normalized_tag_name_component(comp: &str) -> Cow<str> { fn normalized_tag_name_component(comp: &str) -> Cow<'_, str> {
let mut out = normalize_to_nfc(comp); let mut out = normalize_to_nfc(comp);
if out.contains(invalid_char_for_tag) { if out.contains(invalid_char_for_tag) {
out = out.replace(invalid_char_for_tag, "").into(); out = out.replace(invalid_char_for_tag, "").into();
@ -170,7 +170,7 @@ fn normalized_tag_name_component(comp: &str) -> Cow<str> {
} }
} }
pub(super) fn normalize_tag_name(name: &str) -> Result<Cow<str>> { pub(super) fn normalize_tag_name(name: &str) -> Result<Cow<'_, str>> {
let normalized_name: Cow<str> = if name let normalized_name: Cow<str> = if name
.split("::") .split("::")
.any(|comp| matches!(normalized_tag_name_component(comp), Cow::Owned(_))) .any(|comp| matches!(normalized_tag_name_component(comp), Cow::Owned(_)))

View file

@ -121,7 +121,7 @@ pub enum Token<'a> {
CloseConditional(&'a str), CloseConditional(&'a str),
} }
fn comment_token(s: &str) -> nom::IResult<&str, Token> { fn comment_token(s: &str) -> nom::IResult<&str, Token<'_>> {
map( map(
delimited( delimited(
tag(COMMENT_START), tag(COMMENT_START),
@ -151,7 +151,7 @@ fn tokens(mut template: &str) -> impl Iterator<Item = TemplateResult<Token<'_>>>
} }
/// classify handle based on leading character /// classify handle based on leading character
fn classify_handle(s: &str) -> Token { fn classify_handle(s: &str) -> Token<'_> {
let start = s.trim_start_matches('{').trim(); let start = s.trim_start_matches('{').trim();
if start.len() < 2 { if start.len() < 2 {
return Token::Replacement(start); return Token::Replacement(start);

View file

@ -117,7 +117,7 @@ fn captured_sound(caps: &Captures) -> bool {
caps.get(2).unwrap().as_str().starts_with("sound:") caps.get(2).unwrap().as_str().starts_with("sound:")
} }
fn kana_filter(text: &str) -> Cow<str> { fn kana_filter(text: &str) -> Cow<'_, str> {
FURIGANA FURIGANA
.replace_all(&text.replace("&nbsp;", " "), |caps: &Captures| { .replace_all(&text.replace("&nbsp;", " "), |caps: &Captures| {
if captured_sound(caps) { if captured_sound(caps) {
@ -130,7 +130,7 @@ fn kana_filter(text: &str) -> Cow<str> {
.into() .into()
} }
fn kanji_filter(text: &str) -> Cow<str> { fn kanji_filter(text: &str) -> Cow<'_, str> {
FURIGANA FURIGANA
.replace_all(&text.replace("&nbsp;", " "), |caps: &Captures| { .replace_all(&text.replace("&nbsp;", " "), |caps: &Captures| {
if captured_sound(caps) { if captured_sound(caps) {
@ -143,7 +143,7 @@ fn kanji_filter(text: &str) -> Cow<str> {
.into() .into()
} }
fn furigana_filter(text: &str) -> Cow<str> { fn furigana_filter(text: &str) -> Cow<'_, str> {
FURIGANA FURIGANA
.replace_all(&text.replace("&nbsp;", " "), |caps: &Captures| { .replace_all(&text.replace("&nbsp;", " "), |caps: &Captures| {
if captured_sound(caps) { if captured_sound(caps) {

View file

@ -215,8 +215,8 @@ pub fn is_html(text: impl AsRef<str>) -> bool {
HTML.is_match(text.as_ref()) HTML.is_match(text.as_ref())
} }
pub fn html_to_text_line(html: &str, preserve_media_filenames: bool) -> Cow<str> { pub fn html_to_text_line(html: &str, preserve_media_filenames: bool) -> Cow<'_, str> {
let (html_stripper, sound_rep): (fn(&str) -> Cow<str>, _) = if preserve_media_filenames { let (html_stripper, sound_rep): (fn(&str) -> Cow<'_, str>, _) = if preserve_media_filenames {
(strip_html_preserving_media_filenames, "$1") (strip_html_preserving_media_filenames, "$1")
} else { } else {
(strip_html, "") (strip_html, "")
@ -229,15 +229,15 @@ pub fn html_to_text_line(html: &str, preserve_media_filenames: bool) -> Cow<str>
.trim() .trim()
} }
pub fn strip_html(html: &str) -> Cow<str> { pub fn strip_html(html: &str) -> Cow<'_, str> {
strip_html_preserving_entities(html).map_cow(decode_entities) strip_html_preserving_entities(html).map_cow(decode_entities)
} }
pub fn strip_html_preserving_entities(html: &str) -> Cow<str> { pub fn strip_html_preserving_entities(html: &str) -> Cow<'_, str> {
HTML.replace_all(html, "") HTML.replace_all(html, "")
} }
pub fn decode_entities(html: &str) -> Cow<str> { pub fn decode_entities(html: &str) -> Cow<'_, str> {
if html.contains('&') { if html.contains('&') {
match htmlescape::decode_html(html) { match htmlescape::decode_html(html) {
Ok(text) => text.replace('\u{a0}', " ").into(), Ok(text) => text.replace('\u{a0}', " ").into(),
@ -249,7 +249,7 @@ pub fn decode_entities(html: &str) -> Cow<str> {
} }
} }
pub(crate) fn newlines_to_spaces(text: &str) -> Cow<str> { pub(crate) fn newlines_to_spaces(text: &str) -> Cow<'_, str> {
if text.contains('\n') { if text.contains('\n') {
text.replace('\n', " ").into() text.replace('\n', " ").into()
} else { } else {
@ -257,7 +257,7 @@ pub(crate) fn newlines_to_spaces(text: &str) -> Cow<str> {
} }
} }
pub fn strip_html_for_tts(html: &str) -> Cow<str> { pub fn strip_html_for_tts(html: &str) -> Cow<'_, str> {
HTML_LINEBREAK_TAGS HTML_LINEBREAK_TAGS
.replace_all(html, " ") .replace_all(html, " ")
.map_cow(strip_html) .map_cow(strip_html)
@ -282,7 +282,7 @@ pub(crate) struct MediaRef<'a> {
pub fname_decoded: Cow<'a, str>, pub fname_decoded: Cow<'a, str>,
} }
pub(crate) fn extract_media_refs(text: &str) -> Vec<MediaRef> { pub(crate) fn extract_media_refs(text: &str) -> Vec<MediaRef<'_>> {
let mut out = vec![]; let mut out = vec![];
for caps in HTML_MEDIA_TAGS.captures_iter(text) { for caps in HTML_MEDIA_TAGS.captures_iter(text) {
@ -359,11 +359,11 @@ pub(crate) fn extract_underscored_references(text: &str) -> Vec<&str> {
/// Returns the first matching group as a str. This is intended for regexes /// Returns the first matching group as a str. This is intended for regexes
/// where exactly one group matches, and will panic for matches without matching /// where exactly one group matches, and will panic for matches without matching
/// groups. /// groups.
fn extract_match(caps: Captures) -> &str { fn extract_match(caps: Captures<'_>) -> &str {
caps.iter().skip(1).find_map(|g| g).unwrap().as_str() caps.iter().skip(1).find_map(|g| g).unwrap().as_str()
} }
pub fn strip_html_preserving_media_filenames(html: &str) -> Cow<str> { pub fn strip_html_preserving_media_filenames(html: &str) -> Cow<'_, str> {
HTML_MEDIA_TAGS HTML_MEDIA_TAGS
.replace_all(html, r" ${1}${2}${3} ") .replace_all(html, r" ${1}${2}${3} ")
.map_cow(strip_html) .map_cow(strip_html)
@ -385,7 +385,7 @@ pub(crate) fn sanitize_html_no_images(html: &str) -> String {
.to_string() .to_string()
} }
pub(crate) fn normalize_to_nfc(s: &str) -> Cow<str> { pub(crate) fn normalize_to_nfc(s: &str) -> Cow<'_, str> {
match is_nfc(s) { match is_nfc(s) {
false => s.chars().nfc().collect::<String>().into(), false => s.chars().nfc().collect::<String>().into(),
true => s.into(), true => s.into(),
@ -429,7 +429,7 @@ static EXTRA_NO_COMBINING_REPLACEMENTS: phf::Map<char, &str> = phf::phf_map! {
}; };
/// Convert provided string to NFKD form and strip combining characters. /// Convert provided string to NFKD form and strip combining characters.
pub(crate) fn without_combining(s: &str) -> Cow<str> { pub(crate) fn without_combining(s: &str) -> Cow<'_, str> {
// if the string is already normalized // if the string is already normalized
if matches!(is_nfkd_quick(s.chars()), IsNormalized::Yes) { if matches!(is_nfkd_quick(s.chars()), IsNormalized::Yes) {
// and no combining characters found, return unchanged // and no combining characters found, return unchanged
@ -472,7 +472,7 @@ pub(crate) fn is_glob(txt: &str) -> bool {
} }
/// Convert to a RegEx respecting Anki wildcards. /// Convert to a RegEx respecting Anki wildcards.
pub(crate) fn to_re(txt: &str) -> Cow<str> { pub(crate) fn to_re(txt: &str) -> Cow<'_, str> {
to_custom_re(txt, ".") to_custom_re(txt, ".")
} }
@ -492,7 +492,7 @@ pub(crate) fn to_custom_re<'a>(txt: &'a str, wildcard: &str) -> Cow<'a, str> {
} }
/// Convert to SQL respecting Anki wildcards. /// Convert to SQL respecting Anki wildcards.
pub(crate) fn to_sql(txt: &str) -> Cow<str> { pub(crate) fn to_sql(txt: &str) -> Cow<'_, str> {
// escape sequences and unescaped special characters which need conversion // escape sequences and unescaped special characters which need conversion
static RE: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"\\[\\*]|[*%]").unwrap()); static RE: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"\\[\\*]|[*%]").unwrap());
RE.replace_all(txt, |caps: &Captures| { RE.replace_all(txt, |caps: &Captures| {
@ -508,7 +508,7 @@ pub(crate) fn to_sql(txt: &str) -> Cow<str> {
} }
/// Unescape everything. /// Unescape everything.
pub(crate) fn to_text(txt: &str) -> Cow<str> { pub(crate) fn to_text(txt: &str) -> Cow<'_, str> {
static RE: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"\\(.)").unwrap()); static RE: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"\\(.)").unwrap());
RE.replace_all(txt, "$1") RE.replace_all(txt, "$1")
} }
@ -561,14 +561,14 @@ const FRAGMENT_QUERY_UNION: &AsciiSet = &CONTROLS
.add(b'#'); .add(b'#');
/// IRI-encode unescaped local paths in HTML fragment. /// IRI-encode unescaped local paths in HTML fragment.
pub(crate) fn encode_iri_paths(unescaped_html: &str) -> Cow<str> { pub(crate) fn encode_iri_paths(unescaped_html: &str) -> Cow<'_, str> {
transform_html_paths(unescaped_html, |fname| { transform_html_paths(unescaped_html, |fname| {
utf8_percent_encode(fname, FRAGMENT_QUERY_UNION).into() utf8_percent_encode(fname, FRAGMENT_QUERY_UNION).into()
}) })
} }
/// URI-decode escaped local paths in HTML fragment. /// URI-decode escaped local paths in HTML fragment.
pub(crate) fn decode_iri_paths(escaped_html: &str) -> Cow<str> { pub(crate) fn decode_iri_paths(escaped_html: &str) -> Cow<'_, str> {
transform_html_paths(escaped_html, |fname| { transform_html_paths(escaped_html, |fname| {
percent_decode_str(fname).decode_utf8_lossy() percent_decode_str(fname).decode_utf8_lossy()
}) })
@ -577,9 +577,9 @@ pub(crate) fn decode_iri_paths(escaped_html: &str) -> Cow<str> {
/// Apply a transform to local filename references in tags like IMG. /// Apply a transform to local filename references in tags like IMG.
/// Required at display time, as Anki unfortunately stores the references /// Required at display time, as Anki unfortunately stores the references
/// in unencoded form in the database. /// in unencoded form in the database.
fn transform_html_paths<F>(html: &str, transform: F) -> Cow<str> fn transform_html_paths<F>(html: &str, transform: F) -> Cow<'_, str>
where where
F: Fn(&str) -> Cow<str>, F: Fn(&str) -> Cow<'_, str>,
{ {
HTML_MEDIA_TAGS.replace_all(html, |caps: &Captures| { HTML_MEDIA_TAGS.replace_all(html, |caps: &Captures| {
let fname = caps let fname = caps

View file

@ -49,7 +49,7 @@ pub fn compare_answer(expected: &str, typed: &str, combining: bool) -> String {
trait DiffTrait { trait DiffTrait {
fn get_typed(&self) -> &[char]; fn get_typed(&self) -> &[char];
fn get_expected(&self) -> &[char]; fn get_expected(&self) -> &[char];
fn get_expected_original(&self) -> Cow<str>; fn get_expected_original(&self) -> Cow<'_, str>;
fn new(expected: &str, typed: &str) -> Self; fn new(expected: &str, typed: &str) -> Self;
@ -136,7 +136,7 @@ fn render_tokens(tokens: &[DiffToken]) -> String {
/// Prefixes a leading mark character with a non-breaking space to prevent /// Prefixes a leading mark character with a non-breaking space to prevent
/// it from joining the previous token. /// it from joining the previous token.
fn isolate_leading_mark(text: &str) -> Cow<str> { fn isolate_leading_mark(text: &str) -> Cow<'_, str> {
if text if text
.chars() .chars()
.next() .next()
@ -161,7 +161,7 @@ impl DiffTrait for Diff {
fn get_expected(&self) -> &[char] { fn get_expected(&self) -> &[char] {
&self.expected &self.expected
} }
fn get_expected_original(&self) -> Cow<str> { fn get_expected_original(&self) -> Cow<'_, str> {
Cow::Owned(self.get_expected().iter().collect::<String>()) Cow::Owned(self.get_expected().iter().collect::<String>())
} }
@ -191,7 +191,7 @@ impl DiffTrait for DiffNonCombining {
fn get_expected(&self) -> &[char] { fn get_expected(&self) -> &[char] {
&self.base.expected &self.base.expected
} }
fn get_expected_original(&self) -> Cow<str> { fn get_expected_original(&self) -> Cow<'_, str> {
Cow::Borrowed(&self.expected_original) Cow::Borrowed(&self.expected_original)
} }

View file

@ -9,6 +9,8 @@ set QTWEBENGINE_CHROMIUM_FLAGS=--remote-allow-origins=http://localhost:8080
set ANKI_API_PORT=40000 set ANKI_API_PORT=40000
set ANKI_API_HOST=127.0.0.1 set ANKI_API_HOST=127.0.0.1
@if not defined PYENV set PYENV=out\pyenv
call tools\ninja pylib qt || exit /b 1 call tools\ninja pylib qt || exit /b 1
.\out\pyenv\scripts\python tools\run.py %* || exit /b 1 %PYENV%\Scripts\python tools\run.py %* || exit /b 1
popd popd

View file

@ -1,3 +1,3 @@
[toolchain] [toolchain]
# older versions may fail to compile; newer versions may fail the clippy tests # older versions may fail to compile; newer versions may fail the clippy tests
channel = "1.88.0" channel = "1.89.0"

View file

@ -95,8 +95,8 @@
"repository": "https://github.com/TooTallNate/node-agent-base", "repository": "https://github.com/TooTallNate/node-agent-base",
"publisher": "Nathan Rajlich", "publisher": "Nathan Rajlich",
"email": "nathan@tootallnate.net", "email": "nathan@tootallnate.net",
"path": "node_modules/https-proxy-agent/node_modules/agent-base", "path": "node_modules/http-proxy-agent/node_modules/agent-base",
"licenseFile": "node_modules/https-proxy-agent/node_modules/agent-base/README.md" "licenseFile": "node_modules/http-proxy-agent/node_modules/agent-base/README.md"
}, },
"asynckit@0.4.0": { "asynckit@0.4.0": {
"licenses": "MIT", "licenses": "MIT",
@ -127,6 +127,14 @@
"path": "node_modules/browser-process-hrtime", "path": "node_modules/browser-process-hrtime",
"licenseFile": "node_modules/browser-process-hrtime/LICENSE" "licenseFile": "node_modules/browser-process-hrtime/LICENSE"
}, },
"call-bind-apply-helpers@1.0.2": {
"licenses": "MIT",
"repository": "https://github.com/ljharb/call-bind-apply-helpers",
"publisher": "Jordan Harband",
"email": "ljharb@gmail.com",
"path": "node_modules/call-bind-apply-helpers",
"licenseFile": "node_modules/call-bind-apply-helpers/LICENSE"
},
"codemirror@5.65.18": { "codemirror@5.65.18": {
"licenses": "MIT", "licenses": "MIT",
"repository": "https://github.com/codemirror/CodeMirror", "repository": "https://github.com/codemirror/CodeMirror",
@ -436,10 +444,58 @@
"path": "node_modules/domexception", "path": "node_modules/domexception",
"licenseFile": "node_modules/domexception/LICENSE.txt" "licenseFile": "node_modules/domexception/LICENSE.txt"
}, },
"dunder-proto@1.0.1": {
"licenses": "MIT",
"repository": "https://github.com/es-shims/dunder-proto",
"publisher": "Jordan Harband",
"email": "ljharb@gmail.com",
"path": "node_modules/dunder-proto",
"licenseFile": "node_modules/dunder-proto/LICENSE"
},
"empty-npm-package@1.0.0": { "empty-npm-package@1.0.0": {
"licenses": "ISC", "licenses": "ISC",
"path": "node_modules/canvas" "path": "node_modules/canvas"
}, },
"es-define-property@1.0.1": {
"licenses": "MIT",
"repository": "https://github.com/ljharb/es-define-property",
"publisher": "Jordan Harband",
"email": "ljharb@gmail.com",
"path": "node_modules/es-set-tostringtag/node_modules/es-define-property",
"licenseFile": "node_modules/es-set-tostringtag/node_modules/es-define-property/LICENSE"
},
"es-errors@1.3.0": {
"licenses": "MIT",
"repository": "https://github.com/ljharb/es-errors",
"publisher": "Jordan Harband",
"email": "ljharb@gmail.com",
"path": "node_modules/es-errors",
"licenseFile": "node_modules/es-errors/LICENSE"
},
"es-object-atoms@1.0.0": {
"licenses": "MIT",
"repository": "https://github.com/ljharb/es-object-atoms",
"publisher": "Jordan Harband",
"email": "ljharb@gmail.com",
"path": "node_modules/es-object-atoms",
"licenseFile": "node_modules/es-object-atoms/LICENSE"
},
"es-object-atoms@1.1.1": {
"licenses": "MIT",
"repository": "https://github.com/ljharb/es-object-atoms",
"publisher": "Jordan Harband",
"email": "ljharb@gmail.com",
"path": "node_modules/es-set-tostringtag/node_modules/es-object-atoms",
"licenseFile": "node_modules/es-set-tostringtag/node_modules/es-object-atoms/LICENSE"
},
"es-set-tostringtag@2.1.0": {
"licenses": "MIT",
"repository": "https://github.com/es-shims/es-set-tostringtag",
"publisher": "Jordan Harband",
"email": "ljharb@gmail.com",
"path": "node_modules/es-set-tostringtag",
"licenseFile": "node_modules/es-set-tostringtag/LICENSE"
},
"escodegen@2.1.0": { "escodegen@2.1.0": {
"licenses": "BSD-2-Clause", "licenses": "BSD-2-Clause",
"repository": "https://github.com/estools/escodegen", "repository": "https://github.com/estools/escodegen",
@ -474,7 +530,7 @@
"path": "node_modules/fabric", "path": "node_modules/fabric",
"licenseFile": "node_modules/fabric/LICENSE" "licenseFile": "node_modules/fabric/LICENSE"
}, },
"form-data@4.0.1": { "form-data@4.0.4": {
"licenses": "MIT", "licenses": "MIT",
"repository": "https://github.com/form-data/form-data", "repository": "https://github.com/form-data/form-data",
"publisher": "Felix Geisendörfer", "publisher": "Felix Geisendörfer",
@ -482,6 +538,38 @@
"path": "node_modules/form-data", "path": "node_modules/form-data",
"licenseFile": "node_modules/form-data/License" "licenseFile": "node_modules/form-data/License"
}, },
"function-bind@1.1.2": {
"licenses": "MIT",
"repository": "https://github.com/Raynos/function-bind",
"publisher": "Raynos",
"email": "raynos2@gmail.com",
"path": "node_modules/function-bind",
"licenseFile": "node_modules/function-bind/LICENSE"
},
"get-intrinsic@1.3.0": {
"licenses": "MIT",
"repository": "https://github.com/ljharb/get-intrinsic",
"publisher": "Jordan Harband",
"email": "ljharb@gmail.com",
"path": "node_modules/es-set-tostringtag/node_modules/get-intrinsic",
"licenseFile": "node_modules/es-set-tostringtag/node_modules/get-intrinsic/LICENSE"
},
"get-proto@1.0.1": {
"licenses": "MIT",
"repository": "https://github.com/ljharb/get-proto",
"publisher": "Jordan Harband",
"email": "ljharb@gmail.com",
"path": "node_modules/get-proto",
"licenseFile": "node_modules/get-proto/LICENSE"
},
"gopd@1.2.0": {
"licenses": "MIT",
"repository": "https://github.com/ljharb/gopd",
"publisher": "Jordan Harband",
"email": "ljharb@gmail.com",
"path": "node_modules/dunder-proto/node_modules/gopd",
"licenseFile": "node_modules/dunder-proto/node_modules/gopd/LICENSE"
},
"hammerjs@2.0.8": { "hammerjs@2.0.8": {
"licenses": "MIT", "licenses": "MIT",
"repository": "https://github.com/hammerjs/hammer.js", "repository": "https://github.com/hammerjs/hammer.js",
@ -490,6 +578,38 @@
"path": "node_modules/hammerjs", "path": "node_modules/hammerjs",
"licenseFile": "node_modules/hammerjs/LICENSE.md" "licenseFile": "node_modules/hammerjs/LICENSE.md"
}, },
"has-symbols@1.0.3": {
"licenses": "MIT",
"repository": "https://github.com/inspect-js/has-symbols",
"publisher": "Jordan Harband",
"email": "ljharb@gmail.com",
"path": "node_modules/has-symbols",
"licenseFile": "node_modules/has-symbols/LICENSE"
},
"has-symbols@1.1.0": {
"licenses": "MIT",
"repository": "https://github.com/inspect-js/has-symbols",
"publisher": "Jordan Harband",
"email": "ljharb@gmail.com",
"path": "node_modules/es-set-tostringtag/node_modules/has-symbols",
"licenseFile": "node_modules/es-set-tostringtag/node_modules/has-symbols/LICENSE"
},
"has-tostringtag@1.0.2": {
"licenses": "MIT",
"repository": "https://github.com/inspect-js/has-tostringtag",
"publisher": "Jordan Harband",
"email": "ljharb@gmail.com",
"path": "node_modules/has-tostringtag",
"licenseFile": "node_modules/has-tostringtag/LICENSE"
},
"hasown@2.0.2": {
"licenses": "MIT",
"repository": "https://github.com/inspect-js/hasOwn",
"publisher": "Jordan Harband",
"email": "ljharb@gmail.com",
"path": "node_modules/hasown",
"licenseFile": "node_modules/hasown/LICENSE"
},
"html-encoding-sniffer@3.0.0": { "html-encoding-sniffer@3.0.0": {
"licenses": "MIT", "licenses": "MIT",
"repository": "https://github.com/jsdom/html-encoding-sniffer", "repository": "https://github.com/jsdom/html-encoding-sniffer",
@ -587,6 +707,14 @@
"path": "node_modules/marked", "path": "node_modules/marked",
"licenseFile": "node_modules/marked/LICENSE.md" "licenseFile": "node_modules/marked/LICENSE.md"
}, },
"math-intrinsics@1.1.0": {
"licenses": "MIT",
"repository": "https://github.com/es-shims/math-intrinsics",
"publisher": "Jordan Harband",
"email": "ljharb@gmail.com",
"path": "node_modules/math-intrinsics",
"licenseFile": "node_modules/math-intrinsics/LICENSE"
},
"mathjax@3.2.2": { "mathjax@3.2.2": {
"licenses": "Apache-2.0", "licenses": "Apache-2.0",
"repository": "https://github.com/mathjax/MathJax", "repository": "https://github.com/mathjax/MathJax",

View file

@ -3,9 +3,9 @@ Copyright: Ankitects Pty Ltd and contributors
License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
--> -->
<script lang="ts"> <script lang="ts">
import { page } from "$app/stores"; import { page } from "$app/state";
$: message = $page.error!.message; $: message = page.error!.message;
</script> </script>
{message} {message}

View file

@ -3,7 +3,7 @@ Copyright: Ankitects Pty Ltd and contributors
License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
--> -->
<script lang="ts"> <script lang="ts">
import { page } from "$app/stores"; import { page } from "$app/state";
import CardInfo from "../CardInfo.svelte"; import CardInfo from "../CardInfo.svelte";
import type { PageData } from "./$types"; import type { PageData } from "./$types";
@ -11,7 +11,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
export let data: PageData; export let data: PageData;
const showRevlog = $page.url.searchParams.get("revlog") !== "0"; const showRevlog = page.url.searchParams.get("revlog") !== "0";
globalThis.anki ||= {}; globalThis.anki ||= {};
globalThis.anki.updateCard = async (card_id: string): Promise<void> => { globalThis.anki.updateCard = async (card_id: string): Promise<void> => {

View file

@ -3,7 +3,7 @@ Copyright: Ankitects Pty Ltd and contributors
License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
--> -->
<script lang="ts"> <script lang="ts">
import { page } from "$app/stores"; import { page } from "$app/state";
import CardInfo from "../../CardInfo.svelte"; import CardInfo from "../../CardInfo.svelte";
import type { PageData } from "./$types"; import type { PageData } from "./$types";
@ -11,8 +11,8 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
export let data: PageData; export let data: PageData;
const showRevlog = $page.url.searchParams.get("revlog") !== "0"; const showRevlog = page.url.searchParams.get("revlog") !== "0";
const showCurve = $page.url.searchParams.get("curve") !== "0"; const showCurve = page.url.searchParams.get("curve") !== "0";
globalThis.anki ||= {}; globalThis.anki ||= {};
globalThis.anki.updateCardInfos = async (card_id: string): Promise<void> => { globalThis.anki.updateCardInfos = async (card_id: string): Promise<void> => {

View file

@ -136,12 +136,13 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
:global(.container-columns) { :global(.container-columns) {
display: grid; display: grid;
gap: 20px; gap: 0px;
} }
@include bp.with-breakpoint("lg") { @include bp.with-breakpoint("lg") {
:global(.container-columns) { :global(.container-columns) {
grid-template-columns: repeat(2, 1fr); grid-template-columns: repeat(2, 1fr);
gap: 20px;
} }
} }
} }

View file

@ -59,11 +59,6 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
sched: HelpItemScheduler.FSRS, sched: HelpItemScheduler.FSRS,
global: true, global: true,
}, },
computeOptimalRetention: {
title: tr.deckConfigComputeOptimalRetention(),
help: tr.deckConfigComputeOptimalRetentionTooltip4(),
sched: HelpItemScheduler.FSRS,
},
healthCheck: { healthCheck: {
title: tr.deckConfigHealthCheck(), title: tr.deckConfigHealthCheck(),
help: help:

View file

@ -3007,10 +3007,10 @@ __metadata:
languageName: node languageName: node
linkType: hard linkType: hard
"devalue@npm:^5.1.0": "devalue@npm:^5.3.2":
version: 5.1.1 version: 5.3.2
resolution: "devalue@npm:5.1.1" resolution: "devalue@npm:5.3.2"
checksum: 10c0/f6717a856fd54216959abd341cb189e47a9b37d72d8419e055ae77567ff4ed0fb683b1ffb6a71067f645adae5991bffabe6468a3e2385937bff49273e71c1f51 checksum: 10c0/2dab403779233224285afe4b30eaded038df10cb89b8f2c1e41dd855a8e6b634aa24175b87f64df665204bb9a6a6e7758d172682719b9c5cf3cef336ff9fa507
languageName: node languageName: node
linkType: hard linkType: hard