mirror of
https://github.com/ankitects/anki.git
synced 2025-09-18 22:12:21 -04:00
Integrate FSRS into Anki (#2654)
* Pack FSRS data into card.data * Update FSRS card data when preset or weights change + Show FSRS stats in card stats * Show a warning when there's a limited review history * Add some translations; tweak UI * Fix default requested retention * Add browser columns, fix calculation of R * Property searches eg prop:d>0.1 * Integrate FSRS into reviewer * Warn about long learning steps * Hide minimum interval when FSRS is on * Don't apply interval multiplier to FSRS intervals * Expose memory state to Python * Don't set memory state on new cards * Port Jarret's new tests; add some helpers to make tests more compact https://github.com/open-spaced-repetition/fsrs-rs/pull/64 * Fix learning cards not being given memory state * Require update to v3 scheduler * Don't exclude single learning step when calculating memory state * Use relearning step when learning steps unavailable * Update docstring * fix single_card_revlog_to_items (#2656) * not need check the review_kind for unique_dates * add email address to CONTRIBUTORS * fix last first learn & keep early review * cargo fmt * cargo clippy --fix * Add Jarrett to about screen * Fix fsrs_memory_state being initialized to default in get_card() * Set initial memory state on graduate * Update to latest FSRS * Fix experiment.log being empty * Fix broken colpkg imports Introduced by "Update FSRS card data when preset or weights change" * Update memory state during (re)learning; use FSRS for graduating intervals * Reset memory state when cards are manually rescheduled as new * Add difficulty graph; hide eases when FSRS enabled * Add retrievability graph * Derive memory_state from revlog when it's missing and shouldn't be --------- Co-authored-by: Jarrett Ye <jarrett.ye@outlook.com>
This commit is contained in:
parent
6f0bf58d49
commit
5004cd332b
77 changed files with 1805 additions and 575 deletions
|
@ -138,6 +138,7 @@ Monty Evans <montyevans@gmail.com>
|
||||||
Nil Admirari <https://github.com/nihil-admirari>
|
Nil Admirari <https://github.com/nihil-admirari>
|
||||||
Michael Winkworth <github.com/SteelColossus>
|
Michael Winkworth <github.com/SteelColossus>
|
||||||
Mateusz Wojewoda <kawa1.11@o2.pl>
|
Mateusz Wojewoda <kawa1.11@o2.pl>
|
||||||
|
Jarrett Ye <jarrett.ye@outlook.com>
|
||||||
|
|
||||||
********************
|
********************
|
||||||
|
|
||||||
|
|
50
Cargo.lock
generated
50
Cargo.lock
generated
|
@ -107,7 +107,7 @@ dependencies = [
|
||||||
"fluent",
|
"fluent",
|
||||||
"fluent-bundle",
|
"fluent-bundle",
|
||||||
"fnv",
|
"fnv",
|
||||||
"fsrs-optimizer",
|
"fsrs",
|
||||||
"futures",
|
"futures",
|
||||||
"hex",
|
"hex",
|
||||||
"htmlescape",
|
"htmlescape",
|
||||||
|
@ -574,8 +574,8 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "burn"
|
name = "burn"
|
||||||
version = "0.9.0"
|
version = "0.10.0"
|
||||||
source = "git+https://github.com/burn-rs/burn.git?rev=36446e8d35694a9158f97e85e44b84544b8c4afb#36446e8d35694a9158f97e85e44b84544b8c4afb"
|
source = "git+https://github.com/burn-rs/burn.git?rev=e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c#e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"burn-core",
|
"burn-core",
|
||||||
"burn-train",
|
"burn-train",
|
||||||
|
@ -583,8 +583,8 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "burn-autodiff"
|
name = "burn-autodiff"
|
||||||
version = "0.9.0"
|
version = "0.10.0"
|
||||||
source = "git+https://github.com/burn-rs/burn.git?rev=36446e8d35694a9158f97e85e44b84544b8c4afb#36446e8d35694a9158f97e85e44b84544b8c4afb"
|
source = "git+https://github.com/burn-rs/burn.git?rev=e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c#e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"burn-common",
|
"burn-common",
|
||||||
"burn-tensor",
|
"burn-tensor",
|
||||||
|
@ -595,8 +595,8 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "burn-common"
|
name = "burn-common"
|
||||||
version = "0.9.0"
|
version = "0.10.0"
|
||||||
source = "git+https://github.com/burn-rs/burn.git?rev=36446e8d35694a9158f97e85e44b84544b8c4afb#36446e8d35694a9158f97e85e44b84544b8c4afb"
|
source = "git+https://github.com/burn-rs/burn.git?rev=e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c#e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"const-random",
|
"const-random",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
|
@ -606,8 +606,8 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "burn-core"
|
name = "burn-core"
|
||||||
version = "0.9.0"
|
version = "0.10.0"
|
||||||
source = "git+https://github.com/burn-rs/burn.git?rev=36446e8d35694a9158f97e85e44b84544b8c4afb#36446e8d35694a9158f97e85e44b84544b8c4afb"
|
source = "git+https://github.com/burn-rs/burn.git?rev=e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c#e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode",
|
"bincode",
|
||||||
"burn-autodiff",
|
"burn-autodiff",
|
||||||
|
@ -631,8 +631,8 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "burn-dataset"
|
name = "burn-dataset"
|
||||||
version = "0.9.0"
|
version = "0.10.0"
|
||||||
source = "git+https://github.com/burn-rs/burn.git?rev=36446e8d35694a9158f97e85e44b84544b8c4afb#36446e8d35694a9158f97e85e44b84544b8c4afb"
|
source = "git+https://github.com/burn-rs/burn.git?rev=e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c#e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"csv 1.2.2",
|
"csv 1.2.2",
|
||||||
"derive-new",
|
"derive-new",
|
||||||
|
@ -651,8 +651,8 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "burn-derive"
|
name = "burn-derive"
|
||||||
version = "0.9.0"
|
version = "0.10.0"
|
||||||
source = "git+https://github.com/burn-rs/burn.git?rev=36446e8d35694a9158f97e85e44b84544b8c4afb#36446e8d35694a9158f97e85e44b84544b8c4afb"
|
source = "git+https://github.com/burn-rs/burn.git?rev=e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c#e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"derive-new",
|
"derive-new",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
|
@ -662,8 +662,8 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "burn-ndarray"
|
name = "burn-ndarray"
|
||||||
version = "0.9.0"
|
version = "0.10.0"
|
||||||
source = "git+https://github.com/burn-rs/burn.git?rev=36446e8d35694a9158f97e85e44b84544b8c4afb#36446e8d35694a9158f97e85e44b84544b8c4afb"
|
source = "git+https://github.com/burn-rs/burn.git?rev=e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c#e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"burn-autodiff",
|
"burn-autodiff",
|
||||||
"burn-common",
|
"burn-common",
|
||||||
|
@ -680,8 +680,8 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "burn-tensor"
|
name = "burn-tensor"
|
||||||
version = "0.9.0"
|
version = "0.10.0"
|
||||||
source = "git+https://github.com/burn-rs/burn.git?rev=36446e8d35694a9158f97e85e44b84544b8c4afb#36446e8d35694a9158f97e85e44b84544b8c4afb"
|
source = "git+https://github.com/burn-rs/burn.git?rev=e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c#e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"burn-tensor-testgen",
|
"burn-tensor-testgen",
|
||||||
"derive-new",
|
"derive-new",
|
||||||
|
@ -696,8 +696,8 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "burn-tensor-testgen"
|
name = "burn-tensor-testgen"
|
||||||
version = "0.9.0"
|
version = "0.10.0"
|
||||||
source = "git+https://github.com/burn-rs/burn.git?rev=36446e8d35694a9158f97e85e44b84544b8c4afb#36446e8d35694a9158f97e85e44b84544b8c4afb"
|
source = "git+https://github.com/burn-rs/burn.git?rev=e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c#e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
|
@ -706,8 +706,8 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "burn-train"
|
name = "burn-train"
|
||||||
version = "0.9.0"
|
version = "0.10.0"
|
||||||
source = "git+https://github.com/burn-rs/burn.git?rev=36446e8d35694a9158f97e85e44b84544b8c4afb#36446e8d35694a9158f97e85e44b84544b8c4afb"
|
source = "git+https://github.com/burn-rs/burn.git?rev=e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c#e7a65e24c4e88110f2bf6b3a29ac456a12c2ca0c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"burn-core",
|
"burn-core",
|
||||||
"derive-new",
|
"derive-new",
|
||||||
|
@ -1365,9 +1365,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "faster-hex"
|
name = "faster-hex"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e9042d281a5eec0f2387f8c3ea6c4514e2cf2732c90a85aaf383b761ee3b290d"
|
checksum = "239f7bfb930f820ab16a9cd95afc26f88264cf6905c960b340a615384aa3338a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
|
@ -1530,9 +1530,9 @@ dependencies = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fsrs-optimizer"
|
name = "fsrs"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
source = "git+https://github.com/open-spaced-repetition/fsrs-optimizer-rs?rev=e0b15cce555a94de6fdaa4bf1e096d19704a397d#e0b15cce555a94de6fdaa4bf1e096d19704a397d"
|
source = "git+https://github.com/open-spaced-repetition/fsrs-rs.git?rev=bae680bfde996f614741b32ac63a27a1a882a45b#bae680bfde996f614741b32ac63a27a1a882a45b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"burn",
|
"burn",
|
||||||
"itertools 0.11.0",
|
"itertools 0.11.0",
|
||||||
|
|
25
Cargo.toml
25
Cargo.toml
|
@ -26,6 +26,23 @@ members = [
|
||||||
exclude = ["qt/bundle"]
|
exclude = ["qt/bundle"]
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
|
[workspace.dependencies.csv]
|
||||||
|
git = "https://github.com/ankitects/rust-csv.git"
|
||||||
|
rev = "1c9d3aab6f79a7d815c69f925a46a4590c115f90"
|
||||||
|
|
||||||
|
[workspace.dependencies.percent-encoding-iri]
|
||||||
|
git = "https://github.com/ankitects/rust-url.git"
|
||||||
|
rev = "bb930b8d089f4d30d7d19c12e54e66191de47b88"
|
||||||
|
|
||||||
|
[workspace.dependencies.linkcheck]
|
||||||
|
git = "https://github.com/ankitects/linkcheck.git"
|
||||||
|
rev = "184b2ca50ed39ca43da13f0b830a463861adb9ca"
|
||||||
|
|
||||||
|
[workspace.dependencies.fsrs]
|
||||||
|
git = "https://github.com/open-spaced-repetition/fsrs-rs.git"
|
||||||
|
rev = "bae680bfde996f614741b32ac63a27a1a882a45b"
|
||||||
|
# path = "../../../fsrs-rs"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
# local
|
# local
|
||||||
anki = { path = "rslib" }
|
anki = { path = "rslib" }
|
||||||
|
@ -36,14 +53,6 @@ anki_process = { path = "rslib/process" }
|
||||||
anki_proto_gen = { path = "rslib/proto_gen" }
|
anki_proto_gen = { path = "rslib/proto_gen" }
|
||||||
ninja_gen = { "path" = "build/ninja_gen" }
|
ninja_gen = { "path" = "build/ninja_gen" }
|
||||||
|
|
||||||
fsrs-optimizer = { git = "https://github.com/open-spaced-repetition/fsrs-optimizer-rs", rev = "e0b15cce555a94de6fdaa4bf1e096d19704a397d" }
|
|
||||||
# fsrs-optimizer.path = "../../../fsrs-optimizer-rs"
|
|
||||||
|
|
||||||
# forked
|
|
||||||
csv = { git = "https://github.com/ankitects/rust-csv.git", rev = "1c9d3aab6f79a7d815c69f925a46a4590c115f90" }
|
|
||||||
percent-encoding-iri = { git = "https://github.com/ankitects/rust-url.git", rev = "bb930b8d089f4d30d7d19c12e54e66191de47b88" }
|
|
||||||
linkcheck = { git = "https://github.com/ankitects/linkcheck.git", rev = "184b2ca50ed39ca43da13f0b830a463861adb9ca" }
|
|
||||||
|
|
||||||
# pinned
|
# pinned
|
||||||
unicase = "=2.6.0" # any changes could invalidate sqlite indexes
|
unicase = "=2.6.0" # any changes could invalidate sqlite indexes
|
||||||
|
|
||||||
|
|
|
@ -334,7 +334,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "burn",
|
"name": "burn",
|
||||||
"version": "0.9.0",
|
"version": "0.10.0",
|
||||||
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
||||||
"repository": "https://github.com/burn-rs/burn",
|
"repository": "https://github.com/burn-rs/burn",
|
||||||
"license": "Apache-2.0 OR MIT",
|
"license": "Apache-2.0 OR MIT",
|
||||||
|
@ -343,7 +343,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "burn-autodiff",
|
"name": "burn-autodiff",
|
||||||
"version": "0.9.0",
|
"version": "0.10.0",
|
||||||
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
||||||
"repository": "https://github.com/burn-rs/burn/tree/main/burn-autodiff",
|
"repository": "https://github.com/burn-rs/burn/tree/main/burn-autodiff",
|
||||||
"license": "Apache-2.0 OR MIT",
|
"license": "Apache-2.0 OR MIT",
|
||||||
|
@ -352,7 +352,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "burn-common",
|
"name": "burn-common",
|
||||||
"version": "0.9.0",
|
"version": "0.10.0",
|
||||||
"authors": "Dilshod Tadjibaev (@antimora)",
|
"authors": "Dilshod Tadjibaev (@antimora)",
|
||||||
"repository": "https://github.com/burn-rs/burn/tree/main/burn-common",
|
"repository": "https://github.com/burn-rs/burn/tree/main/burn-common",
|
||||||
"license": "Apache-2.0 OR MIT",
|
"license": "Apache-2.0 OR MIT",
|
||||||
|
@ -361,7 +361,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "burn-core",
|
"name": "burn-core",
|
||||||
"version": "0.9.0",
|
"version": "0.10.0",
|
||||||
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
||||||
"repository": "https://github.com/burn-rs/burn/tree/main/burn-core",
|
"repository": "https://github.com/burn-rs/burn/tree/main/burn-core",
|
||||||
"license": "Apache-2.0 OR MIT",
|
"license": "Apache-2.0 OR MIT",
|
||||||
|
@ -370,7 +370,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "burn-dataset",
|
"name": "burn-dataset",
|
||||||
"version": "0.9.0",
|
"version": "0.10.0",
|
||||||
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
||||||
"repository": "https://github.com/burn-rs/burn/tree/main/burn-dataset",
|
"repository": "https://github.com/burn-rs/burn/tree/main/burn-dataset",
|
||||||
"license": "Apache-2.0 OR MIT",
|
"license": "Apache-2.0 OR MIT",
|
||||||
|
@ -379,7 +379,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "burn-derive",
|
"name": "burn-derive",
|
||||||
"version": "0.9.0",
|
"version": "0.10.0",
|
||||||
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
||||||
"repository": "https://github.com/burn-rs/burn/tree/main/burn-derive",
|
"repository": "https://github.com/burn-rs/burn/tree/main/burn-derive",
|
||||||
"license": "Apache-2.0 OR MIT",
|
"license": "Apache-2.0 OR MIT",
|
||||||
|
@ -388,7 +388,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "burn-ndarray",
|
"name": "burn-ndarray",
|
||||||
"version": "0.9.0",
|
"version": "0.10.0",
|
||||||
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
||||||
"repository": "https://github.com/burn-rs/burn/tree/main/burn-ndarray",
|
"repository": "https://github.com/burn-rs/burn/tree/main/burn-ndarray",
|
||||||
"license": "Apache-2.0 OR MIT",
|
"license": "Apache-2.0 OR MIT",
|
||||||
|
@ -397,7 +397,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "burn-tensor",
|
"name": "burn-tensor",
|
||||||
"version": "0.9.0",
|
"version": "0.10.0",
|
||||||
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
||||||
"repository": "https://github.com/burn-rs/burn/tree/main/burn-tensor",
|
"repository": "https://github.com/burn-rs/burn/tree/main/burn-tensor",
|
||||||
"license": "Apache-2.0 OR MIT",
|
"license": "Apache-2.0 OR MIT",
|
||||||
|
@ -406,7 +406,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "burn-tensor-testgen",
|
"name": "burn-tensor-testgen",
|
||||||
"version": "0.9.0",
|
"version": "0.10.0",
|
||||||
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
||||||
"repository": "https://github.com/burn-rs/burn/tree/main/burn-tensor-testgen",
|
"repository": "https://github.com/burn-rs/burn/tree/main/burn-tensor-testgen",
|
||||||
"license": "Apache-2.0 OR MIT",
|
"license": "Apache-2.0 OR MIT",
|
||||||
|
@ -415,7 +415,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "burn-train",
|
"name": "burn-train",
|
||||||
"version": "0.9.0",
|
"version": "0.10.0",
|
||||||
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
"authors": "nathanielsimard <nathaniel.simard.42@gmail.com>",
|
||||||
"repository": "https://github.com/burn-rs/burn/tree/main/burn-train",
|
"repository": "https://github.com/burn-rs/burn/tree/main/burn-train",
|
||||||
"license": "Apache-2.0 OR MIT",
|
"license": "Apache-2.0 OR MIT",
|
||||||
|
@ -847,7 +847,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "faster-hex",
|
"name": "faster-hex",
|
||||||
"version": "0.8.0",
|
"version": "0.8.1",
|
||||||
"authors": "zhangsoledad <787953403@qq.com>",
|
"authors": "zhangsoledad <787953403@qq.com>",
|
||||||
"repository": "https://github.com/NervosFoundation/faster-hex",
|
"repository": "https://github.com/NervosFoundation/faster-hex",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
|
@ -972,7 +972,7 @@
|
||||||
"description": "Parser for values from the Forwarded header (RFC 7239)"
|
"description": "Parser for values from the Forwarded header (RFC 7239)"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "fsrs-optimizer",
|
"name": "fsrs",
|
||||||
"version": "0.1.0",
|
"version": "0.1.0",
|
||||||
"authors": null,
|
"authors": null,
|
||||||
"repository": null,
|
"repository": null,
|
||||||
|
|
|
@ -24,6 +24,9 @@ card-stats-review-log-type-filtered = Filtered
|
||||||
card-stats-review-log-type-manual = Manual
|
card-stats-review-log-type-manual = Manual
|
||||||
card-stats-no-card = (No card to display.)
|
card-stats-no-card = (No card to display.)
|
||||||
card-stats-custom-data = Custom Data
|
card-stats-custom-data = Custom Data
|
||||||
|
card-stats-fsrs-stability = Stability
|
||||||
|
card-stats-fsrs-difficulty = Difficulty
|
||||||
|
card-stats-fsrs-retrievability = Retrievability
|
||||||
|
|
||||||
## Window Titles
|
## Window Titles
|
||||||
|
|
||||||
|
|
|
@ -307,6 +307,26 @@ deck-config-maximum-answer-secs-above-recommended = Anki can schedule your revie
|
||||||
|
|
||||||
deck-config-which-deck = Which deck would you like to display options for?
|
deck-config-which-deck = Which deck would you like to display options for?
|
||||||
|
|
||||||
|
## Messages related to the FSRS scheduler
|
||||||
|
|
||||||
|
deck-config-updating-cards = Updating cards: { $current_cards_count }/{ $total_cards_count }...
|
||||||
|
deck-config-invalid-weights = Weights must be either left blank to use the defaults, or must be 17 comma-separated numbers.
|
||||||
|
deck-config-not-enough-history = Insufficient review history to perform this operation.
|
||||||
|
deck-config-limited-history =
|
||||||
|
{ $count ->
|
||||||
|
[one] Only { $count } review was found.
|
||||||
|
*[other] Only { $count } reviews were found.
|
||||||
|
} The custom weights are likely to be inaccurate, and using the defaults instead is recommended.
|
||||||
|
deck-config-compute-weights-search = Search; leave blank for all cards using this preset
|
||||||
|
# Numbers that control how aggressively the FSRS algorithm schedules cards
|
||||||
|
deck-config-weights = Model weights
|
||||||
|
deck-config-compute-optimal-weights = Compute optimal weights
|
||||||
|
deck-config-compute-optimal-retention = Compute optimal retention
|
||||||
|
deck-config-compute-button = Compute
|
||||||
|
deck-config-analyze-button = Analyze
|
||||||
|
deck-config-desired-retention = Desired retention
|
||||||
|
deck-config-smaller-is-better = Smaller numbers indicate better memory estimates.
|
||||||
|
deck-config-steps-too-large-for-fsrs = When FSRS is enabled, interday (re)learning steps are not recommended.
|
||||||
|
|
||||||
## NO NEED TO TRANSLATE. This text is no longer used by Anki, and will be removed in the future.
|
## NO NEED TO TRANSLATE. This text is no longer used by Anki, and will be removed in the future.
|
||||||
|
|
||||||
|
|
|
@ -93,13 +93,27 @@ statistics-range-deck = deck
|
||||||
statistics-range-collection = collection
|
statistics-range-collection = collection
|
||||||
statistics-range-search = Search
|
statistics-range-search = Search
|
||||||
statistics-card-ease-title = Card Ease
|
statistics-card-ease-title = Card Ease
|
||||||
|
statistics-card-difficulty-title = Card Difficulty
|
||||||
|
statistics-card-retrievability-title = Card Retrievability
|
||||||
statistics-card-ease-subtitle = The lower the ease, the more frequently a card will appear.
|
statistics-card-ease-subtitle = The lower the ease, the more frequently a card will appear.
|
||||||
|
statistics-card-difficulty-subtitle = The higher the difficulty, the more frequently a card will appear.
|
||||||
|
statistics-retrievability-subtitle = How likely you are to remember.
|
||||||
# eg "3 cards with 150-170% ease"
|
# eg "3 cards with 150-170% ease"
|
||||||
statistics-card-ease-tooltip =
|
statistics-card-ease-tooltip =
|
||||||
{ $cards ->
|
{ $cards ->
|
||||||
[one] { $cards } card with { $percent } ease
|
[one] { $cards } card with { $percent } ease
|
||||||
*[other] { $cards } cards with { $percent } ease
|
*[other] { $cards } cards with { $percent } ease
|
||||||
}
|
}
|
||||||
|
statistics-card-difficulty-tooltip =
|
||||||
|
{ $cards ->
|
||||||
|
[one] { $cards } card with { $percent } difficulty
|
||||||
|
*[other] { $cards } cards with { $percent } difficulty
|
||||||
|
}
|
||||||
|
statistics-retrievability-tooltip =
|
||||||
|
{ $cards ->
|
||||||
|
[one] { $cards } card with { $percent } retrievability
|
||||||
|
*[other] { $cards } cards with { $percent } retrievability
|
||||||
|
}
|
||||||
statistics-future-due-title = Future Due
|
statistics-future-due-title = Future Due
|
||||||
statistics-future-due-subtitle = The number of reviews due in the future.
|
statistics-future-due-subtitle = The number of reviews due in the future.
|
||||||
statistics-added-title = Added
|
statistics-added-title = Added
|
||||||
|
@ -200,6 +214,8 @@ statistics-cards-per-day =
|
||||||
*[other] { $count } cards/day
|
*[other] { $count } cards/day
|
||||||
}
|
}
|
||||||
statistics-average-ease = Average ease
|
statistics-average-ease = Average ease
|
||||||
|
statistics-average-difficulty = Average difficulty
|
||||||
|
statistics-average-retrievability = Average retrievability
|
||||||
statistics-save-pdf = Save PDF
|
statistics-save-pdf = Save PDF
|
||||||
statistics-saved = Saved.
|
statistics-saved = Saved.
|
||||||
statistics-stats = stats
|
statistics-stats = stats
|
||||||
|
|
|
@ -49,9 +49,15 @@ message Card {
|
||||||
int64 original_deck_id = 16;
|
int64 original_deck_id = 16;
|
||||||
uint32 flags = 17;
|
uint32 flags = 17;
|
||||||
optional uint32 original_position = 18;
|
optional uint32 original_position = 18;
|
||||||
|
optional FsrsMemoryState fsrs_memory_state = 20;
|
||||||
string custom_data = 19;
|
string custom_data = 19;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message FsrsMemoryState {
|
||||||
|
float stability = 1;
|
||||||
|
float difficulty = 2;
|
||||||
|
}
|
||||||
|
|
||||||
message UpdateCardsRequest {
|
message UpdateCardsRequest {
|
||||||
repeated Card cards = 1;
|
repeated Card cards = 1;
|
||||||
bool skip_undo_entry = 2;
|
bool skip_undo_entry = 2;
|
||||||
|
|
|
@ -118,17 +118,6 @@ message Progress {
|
||||||
uint32 stage_current = 3;
|
uint32 stage_current = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ComputeWeights {
|
|
||||||
uint32 current = 1;
|
|
||||||
uint32 total = 2;
|
|
||||||
uint32 revlog_entries = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ComputeRetention {
|
|
||||||
uint32 current = 1;
|
|
||||||
uint32 total = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
oneof value {
|
oneof value {
|
||||||
generic.Empty none = 1;
|
generic.Empty none = 1;
|
||||||
sync.MediaSyncProgress media_sync = 2;
|
sync.MediaSyncProgress media_sync = 2;
|
||||||
|
@ -138,11 +127,29 @@ message Progress {
|
||||||
DatabaseCheck database_check = 6;
|
DatabaseCheck database_check = 6;
|
||||||
string importing = 7;
|
string importing = 7;
|
||||||
string exporting = 8;
|
string exporting = 8;
|
||||||
ComputeWeights compute_weights = 9;
|
ComputeWeightsProgress compute_weights = 9;
|
||||||
ComputeRetention compute_retention = 10;
|
ComputeRetentionProgress compute_retention = 10;
|
||||||
|
ComputeMemoryProgress compute_memory = 11;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message ComputeWeightsProgress {
|
||||||
|
uint32 current = 1;
|
||||||
|
uint32 total = 2;
|
||||||
|
uint32 fsrs_items = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ComputeRetentionProgress {
|
||||||
|
uint32 current = 1;
|
||||||
|
uint32 total = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ComputeMemoryProgress {
|
||||||
|
uint32 current_cards = 1;
|
||||||
|
uint32 total_cards = 2;
|
||||||
|
string label = 3;
|
||||||
|
}
|
||||||
|
|
||||||
message CreateBackupRequest {
|
message CreateBackupRequest {
|
||||||
string backup_folder = 1;
|
string backup_folder = 1;
|
||||||
// Create a backup even if the configured interval hasn't elapsed yet.
|
// Create a backup even if the configured interval hasn't elapsed yet.
|
||||||
|
|
|
@ -63,6 +63,7 @@ message SchedulingState {
|
||||||
message Learning {
|
message Learning {
|
||||||
uint32 remaining_steps = 1;
|
uint32 remaining_steps = 1;
|
||||||
uint32 scheduled_secs = 2;
|
uint32 scheduled_secs = 2;
|
||||||
|
optional cards.FsrsMemoryState fsrs_memory_state = 6;
|
||||||
}
|
}
|
||||||
message Review {
|
message Review {
|
||||||
uint32 scheduled_days = 1;
|
uint32 scheduled_days = 1;
|
||||||
|
@ -70,6 +71,7 @@ message SchedulingState {
|
||||||
float ease_factor = 3;
|
float ease_factor = 3;
|
||||||
uint32 lapses = 4;
|
uint32 lapses = 4;
|
||||||
bool leeched = 5;
|
bool leeched = 5;
|
||||||
|
optional cards.FsrsMemoryState fsrs_memory_state = 6;
|
||||||
}
|
}
|
||||||
message Relearning {
|
message Relearning {
|
||||||
Review review = 1;
|
Review review = 1;
|
||||||
|
@ -330,6 +332,8 @@ message ComputeFsrsWeightsRequest {
|
||||||
|
|
||||||
message ComputeFsrsWeightsResponse {
|
message ComputeFsrsWeightsResponse {
|
||||||
repeated float weights = 1;
|
repeated float weights = 1;
|
||||||
|
// if less than 1000, should warn user
|
||||||
|
uint32 fsrs_items = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ComputeOptimalRetentionRequest {
|
message ComputeOptimalRetentionRequest {
|
||||||
|
@ -338,9 +342,18 @@ message ComputeOptimalRetentionRequest {
|
||||||
uint32 days_to_simulate = 3;
|
uint32 days_to_simulate = 3;
|
||||||
uint32 max_seconds_of_study_per_day = 4;
|
uint32 max_seconds_of_study_per_day = 4;
|
||||||
uint32 max_interval = 5;
|
uint32 max_interval = 5;
|
||||||
uint32 recall_secs = 6;
|
double recall_secs_hard = 6;
|
||||||
uint32 forget_secs = 7;
|
double recall_secs_good = 7;
|
||||||
uint32 learn_secs = 8;
|
double recall_secs_easy = 8;
|
||||||
|
uint32 forget_secs = 9;
|
||||||
|
uint32 learn_secs = 10;
|
||||||
|
double first_rating_probability_again = 11;
|
||||||
|
double first_rating_probability_hard = 12;
|
||||||
|
double first_rating_probability_good = 13;
|
||||||
|
double first_rating_probability_easy = 14;
|
||||||
|
double review_rating_probability_hard = 15;
|
||||||
|
double review_rating_probability_good = 16;
|
||||||
|
double review_rating_probability_easy = 17;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ComputeOptimalRetentionResponse {
|
message ComputeOptimalRetentionResponse {
|
||||||
|
@ -354,5 +367,5 @@ message EvaluateWeightsRequest {
|
||||||
|
|
||||||
message EvaluateWeightsResponse {
|
message EvaluateWeightsResponse {
|
||||||
float log_loss = 1;
|
float log_loss = 1;
|
||||||
float rmse = 2;
|
float rmse_bins = 2;
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,10 @@ message CardStatsResponse {
|
||||||
float total_secs = 15;
|
float total_secs = 15;
|
||||||
string card_type = 16;
|
string card_type = 16;
|
||||||
string notetype = 17;
|
string notetype = 17;
|
||||||
string custom_data = 18;
|
optional cards.FsrsMemoryState fsrs_memory_state = 18;
|
||||||
|
// not set if due date/state not available
|
||||||
|
optional float fsrs_retrievability = 19;
|
||||||
|
string custom_data = 20;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GraphsRequest {
|
message GraphsRequest {
|
||||||
|
@ -70,6 +73,9 @@ message GraphsResponse {
|
||||||
message Eases {
|
message Eases {
|
||||||
map<uint32, uint32> eases = 1;
|
map<uint32, uint32> eases = 1;
|
||||||
}
|
}
|
||||||
|
message Retrievability {
|
||||||
|
map<uint32, uint32> retrievability = 1;
|
||||||
|
}
|
||||||
message FutureDue {
|
message FutureDue {
|
||||||
map<int32, uint32> future_due = 1;
|
map<int32, uint32> future_due = 1;
|
||||||
bool have_backlog = 2;
|
bool have_backlog = 2;
|
||||||
|
@ -141,11 +147,13 @@ message GraphsResponse {
|
||||||
Hours hours = 3;
|
Hours hours = 3;
|
||||||
Today today = 4;
|
Today today = 4;
|
||||||
Eases eases = 5;
|
Eases eases = 5;
|
||||||
|
Eases difficulty = 11;
|
||||||
Intervals intervals = 6;
|
Intervals intervals = 6;
|
||||||
FutureDue future_due = 7;
|
FutureDue future_due = 7;
|
||||||
Added added = 8;
|
Added added = 8;
|
||||||
ReviewCountsAndTimes reviews = 9;
|
ReviewCountsAndTimes reviews = 9;
|
||||||
uint32 rollover_hour = 10;
|
uint32 rollover_hour = 10;
|
||||||
|
Retrievability retrievability = 12;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GraphPreferences {
|
message GraphPreferences {
|
||||||
|
|
|
@ -32,6 +32,7 @@ from anki.sound import AVTag
|
||||||
# types
|
# types
|
||||||
CardId = NewType("CardId", int)
|
CardId = NewType("CardId", int)
|
||||||
BackendCard = cards_pb2.Card
|
BackendCard = cards_pb2.Card
|
||||||
|
FSRSMemoryState = cards_pb2.FsrsMemoryState
|
||||||
|
|
||||||
|
|
||||||
class Card(DeprecatedNamesMixin):
|
class Card(DeprecatedNamesMixin):
|
||||||
|
@ -44,6 +45,7 @@ class Card(DeprecatedNamesMixin):
|
||||||
odid: anki.decks.DeckId
|
odid: anki.decks.DeckId
|
||||||
queue: CardQueue
|
queue: CardQueue
|
||||||
type: CardType
|
type: CardType
|
||||||
|
fsrs_memory_state: FSRSMemoryState | None
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
@ -93,6 +95,9 @@ class Card(DeprecatedNamesMixin):
|
||||||
card.original_position if card.HasField("original_position") else None
|
card.original_position if card.HasField("original_position") else None
|
||||||
)
|
)
|
||||||
self.custom_data = card.custom_data
|
self.custom_data = card.custom_data
|
||||||
|
self.fsrs_memory_state = (
|
||||||
|
card.fsrs_memory_state if card.HasField("fsrs_memory_state") else None
|
||||||
|
)
|
||||||
|
|
||||||
def _to_backend_card(self) -> cards_pb2.Card:
|
def _to_backend_card(self) -> cards_pb2.Card:
|
||||||
# mtime & usn are set by backend
|
# mtime & usn are set by backend
|
||||||
|
@ -114,6 +119,7 @@ class Card(DeprecatedNamesMixin):
|
||||||
flags=self.flags,
|
flags=self.flags,
|
||||||
original_position=self.original_position,
|
original_position=self.original_position,
|
||||||
custom_data=self.custom_data,
|
custom_data=self.custom_data,
|
||||||
|
fsrs_memory_state=self.fsrs_memory_state,
|
||||||
)
|
)
|
||||||
|
|
||||||
def flush(self) -> None:
|
def flush(self) -> None:
|
||||||
|
|
|
@ -232,6 +232,7 @@ def show(mw: aqt.AnkiQt) -> QDialog:
|
||||||
"Edgar Benavent Català",
|
"Edgar Benavent Català",
|
||||||
"Kieran Black",
|
"Kieran Black",
|
||||||
"Mateusz Wojewoda",
|
"Mateusz Wojewoda",
|
||||||
|
"Jarrett Ye",
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,6 @@ class DeckBrowser:
|
||||||
self.web = mw.web
|
self.web = mw.web
|
||||||
self.bottom = BottomBar(mw, mw.bottomWeb)
|
self.bottom = BottomBar(mw, mw.bottomWeb)
|
||||||
self.scrollPos = QPoint(0, 0)
|
self.scrollPos = QPoint(0, 0)
|
||||||
self._v1_message_dismissed_at = 0
|
|
||||||
self._refresh_needed = False
|
self._refresh_needed = False
|
||||||
|
|
||||||
def show(self) -> None:
|
def show(self) -> None:
|
||||||
|
@ -116,7 +115,10 @@ class DeckBrowser:
|
||||||
elif cmd == "v2upgrade":
|
elif cmd == "v2upgrade":
|
||||||
self._confirm_upgrade()
|
self._confirm_upgrade()
|
||||||
elif cmd == "v2upgradeinfo":
|
elif cmd == "v2upgradeinfo":
|
||||||
|
if self.mw.col.sched_ver() == 1:
|
||||||
openLink("https://faqs.ankiweb.net/the-anki-2.1-scheduler.html")
|
openLink("https://faqs.ankiweb.net/the-anki-2.1-scheduler.html")
|
||||||
|
else:
|
||||||
|
openLink("https://faqs.ankiweb.net/the-2021-scheduler.html")
|
||||||
elif cmd == "select":
|
elif cmd == "select":
|
||||||
set_current_deck(
|
set_current_deck(
|
||||||
parent=self.mw, deck_id=DeckId(int(arg))
|
parent=self.mw, deck_id=DeckId(int(arg))
|
||||||
|
@ -365,14 +367,16 @@ class DeckBrowser:
|
||||||
######################################################################
|
######################################################################
|
||||||
|
|
||||||
def _v1_upgrade_message(self) -> str:
|
def _v1_upgrade_message(self) -> str:
|
||||||
if self.mw.col.sched_ver() == 2:
|
if self.mw.col.sched_ver() == 2 and self.mw.col.v3_scheduler():
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
update_required = tr.scheduling_update_required().replace("V2", "v3")
|
||||||
|
|
||||||
return f"""
|
return f"""
|
||||||
<center>
|
<center>
|
||||||
<div class=callout>
|
<div class=callout>
|
||||||
<div>
|
<div>
|
||||||
{tr.scheduling_update_required()}
|
{update_required}
|
||||||
</div>
|
</div>
|
||||||
<div>
|
<div>
|
||||||
<button onclick='pycmd("v2upgrade")'>
|
<button onclick='pycmd("v2upgrade")'>
|
||||||
|
@ -387,8 +391,10 @@ class DeckBrowser:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def _confirm_upgrade(self) -> None:
|
def _confirm_upgrade(self) -> None:
|
||||||
|
if self.mw.col.sched_ver() == 1:
|
||||||
self.mw.col.mod_schema(check=True)
|
self.mw.col.mod_schema(check=True)
|
||||||
self.mw.col.upgrade_to_v2_scheduler()
|
self.mw.col.upgrade_to_v2_scheduler()
|
||||||
|
self.mw.col.set_v3_scheduler(True)
|
||||||
|
|
||||||
showInfo(tr.scheduling_update_done())
|
showInfo(tr.scheduling_update_done())
|
||||||
self.refresh()
|
self.refresh()
|
||||||
|
|
|
@ -25,7 +25,7 @@ import aqt
|
||||||
import aqt.main
|
import aqt.main
|
||||||
import aqt.operations
|
import aqt.operations
|
||||||
from anki import hooks
|
from anki import hooks
|
||||||
from anki.collection import OpChanges, OpChangesOnly, SearchNode
|
from anki.collection import OpChanges, OpChangesOnly, Progress, SearchNode
|
||||||
from anki.decks import UpdateDeckConfigs
|
from anki.decks import UpdateDeckConfigs
|
||||||
from anki.scheduler.v3 import SchedulingStatesWithContext, SetSchedulingStatesRequest
|
from anki.scheduler.v3 import SchedulingStatesWithContext, SetSchedulingStatesRequest
|
||||||
from anki.utils import dev_mode
|
from anki.utils import dev_mode
|
||||||
|
@ -33,6 +33,7 @@ from aqt.changenotetype import ChangeNotetypeDialog
|
||||||
from aqt.deckoptions import DeckOptionsDialog
|
from aqt.deckoptions import DeckOptionsDialog
|
||||||
from aqt.operations import on_op_finished
|
from aqt.operations import on_op_finished
|
||||||
from aqt.operations.deck import update_deck_configs as update_deck_configs_op
|
from aqt.operations.deck import update_deck_configs as update_deck_configs_op
|
||||||
|
from aqt.progress import ProgressUpdate
|
||||||
from aqt.qt import *
|
from aqt.qt import *
|
||||||
from aqt.utils import aqt_data_path
|
from aqt.utils import aqt_data_path
|
||||||
|
|
||||||
|
@ -394,6 +395,16 @@ def update_deck_configs() -> bytes:
|
||||||
input = UpdateDeckConfigs()
|
input = UpdateDeckConfigs()
|
||||||
input.ParseFromString(request.data)
|
input.ParseFromString(request.data)
|
||||||
|
|
||||||
|
def on_progress(progress: Progress, update: ProgressUpdate) -> None:
|
||||||
|
if not progress.HasField("compute_memory"):
|
||||||
|
return
|
||||||
|
val = progress.compute_memory
|
||||||
|
update.max = val.total_cards
|
||||||
|
update.value = val.current_cards
|
||||||
|
update.label = val.label
|
||||||
|
if update.user_wants_abort:
|
||||||
|
update.abort = True
|
||||||
|
|
||||||
def on_success(changes: OpChanges) -> None:
|
def on_success(changes: OpChanges) -> None:
|
||||||
if isinstance(window := aqt.mw.app.activeWindow(), DeckOptionsDialog):
|
if isinstance(window := aqt.mw.app.activeWindow(), DeckOptionsDialog):
|
||||||
window.reject()
|
window.reject()
|
||||||
|
@ -401,7 +412,7 @@ def update_deck_configs() -> bytes:
|
||||||
def handle_on_main() -> None:
|
def handle_on_main() -> None:
|
||||||
update_deck_configs_op(parent=aqt.mw, input=input).success(
|
update_deck_configs_op(parent=aqt.mw, input=input).success(
|
||||||
on_success
|
on_success
|
||||||
).run_in_background()
|
).with_backend_progress(on_progress).run_in_background()
|
||||||
|
|
||||||
aqt.mw.taskman.run_on_main(handle_on_main)
|
aqt.mw.taskman.run_on_main(handle_on_main)
|
||||||
return b""
|
return b""
|
||||||
|
|
|
@ -153,9 +153,9 @@ class Reviewer:
|
||||||
hooks.card_did_leech.append(self.onLeech)
|
hooks.card_did_leech.append(self.onLeech)
|
||||||
|
|
||||||
def show(self) -> None:
|
def show(self) -> None:
|
||||||
if self.mw.col.sched_ver() == 1:
|
if self.mw.col.sched_ver() == 1 or not self.mw.col.v3_scheduler():
|
||||||
self.mw.moveToState("deckBrowser")
|
self.mw.moveToState("deckBrowser")
|
||||||
show_warning(tr.scheduling_update_required())
|
show_warning(tr.scheduling_update_required().replace("V2", "v3"))
|
||||||
return
|
return
|
||||||
self.mw.setStateShortcuts(self._shortcutKeys()) # type: ignore
|
self.mw.setStateShortcuts(self._shortcutKeys()) # type: ignore
|
||||||
self.web.set_bridge_command(self._linkHandler, self)
|
self.web.set_bridge_command(self._linkHandler, self)
|
||||||
|
|
|
@ -120,8 +120,11 @@ class TaskManager(QObject):
|
||||||
|
|
||||||
def wrapped_done(fut: Future) -> None:
|
def wrapped_done(fut: Future) -> None:
|
||||||
self.mw.progress.finish()
|
self.mw.progress.finish()
|
||||||
|
# allow the event loop to close the window before we proceed
|
||||||
if on_done:
|
if on_done:
|
||||||
on_done(fut)
|
self.mw.progress.single_shot(
|
||||||
|
100, lambda: on_done(fut), requires_collection=False
|
||||||
|
)
|
||||||
|
|
||||||
self.run_in_background(task, wrapped_done, uses_collection=uses_collection)
|
self.run_in_background(task, wrapped_done, uses_collection=uses_collection)
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@ flate2.workspace = true
|
||||||
fluent.workspace = true
|
fluent.workspace = true
|
||||||
fluent-bundle.workspace = true
|
fluent-bundle.workspace = true
|
||||||
fnv.workspace = true
|
fnv.workspace = true
|
||||||
fsrs-optimizer.workspace = true
|
fsrs.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
htmlescape.workspace = true
|
htmlescape.workspace = true
|
||||||
|
|
|
@ -40,7 +40,10 @@ impl AnkiError {
|
||||||
AnkiError::FileIoError { .. } => Kind::IoError,
|
AnkiError::FileIoError { .. } => Kind::IoError,
|
||||||
AnkiError::MediaCheckRequired => Kind::InvalidInput,
|
AnkiError::MediaCheckRequired => Kind::InvalidInput,
|
||||||
AnkiError::InvalidId => Kind::InvalidInput,
|
AnkiError::InvalidId => Kind::InvalidInput,
|
||||||
AnkiError::InvalidMethodIndex | AnkiError::InvalidServiceIndex => Kind::InvalidInput,
|
AnkiError::InvalidMethodIndex
|
||||||
|
| AnkiError::InvalidServiceIndex
|
||||||
|
| AnkiError::FsrsWeightsInvalid
|
||||||
|
| AnkiError::FsrsInsufficientData => Kind::InvalidInput,
|
||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
AnkiError::WindowsError { .. } => Kind::OsError,
|
AnkiError::WindowsError { .. } => Kind::OsError,
|
||||||
};
|
};
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use fsrs::FSRS;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use strum::Display;
|
use strum::Display;
|
||||||
use strum::EnumIter;
|
use strum::EnumIter;
|
||||||
|
@ -52,6 +53,9 @@ pub enum Column {
|
||||||
SortField,
|
SortField,
|
||||||
#[strum(serialize = "noteTags")]
|
#[strum(serialize = "noteTags")]
|
||||||
Tags,
|
Tags,
|
||||||
|
Stability,
|
||||||
|
Difficulty,
|
||||||
|
Retrievability,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct RowContext {
|
struct RowContext {
|
||||||
|
@ -115,6 +119,15 @@ impl Card {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn days_since_last_review(&self, timing: &SchedTimingToday) -> Option<u32> {
|
||||||
|
self.due_time(timing).map(|due| {
|
||||||
|
due.adding_secs(-86_400 * self.interval as i64)
|
||||||
|
.elapsed_secs()
|
||||||
|
.max(0) as u32
|
||||||
|
/ 86_400
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Note {
|
impl Note {
|
||||||
|
@ -144,17 +157,18 @@ impl Column {
|
||||||
Self::Reps => tr.scheduling_reviews(),
|
Self::Reps => tr.scheduling_reviews(),
|
||||||
Self::SortField => tr.browsing_sort_field(),
|
Self::SortField => tr.browsing_sort_field(),
|
||||||
Self::Tags => tr.editing_tags(),
|
Self::Tags => tr.editing_tags(),
|
||||||
|
Self::Stability => tr.card_stats_fsrs_stability(),
|
||||||
|
Self::Difficulty => tr.card_stats_fsrs_difficulty(),
|
||||||
|
Self::Retrievability => tr.card_stats_fsrs_retrievability(),
|
||||||
}
|
}
|
||||||
.into()
|
.into()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn notes_mode_label(self, tr: &I18n) -> String {
|
pub fn notes_mode_label(self, tr: &I18n) -> String {
|
||||||
match self {
|
match self {
|
||||||
Self::CardMod => tr.search_card_modified(),
|
|
||||||
Self::Cards => tr.editing_cards(),
|
Self::Cards => tr.editing_cards(),
|
||||||
Self::Ease => tr.browsing_average_ease(),
|
Self::Ease => tr.browsing_average_ease(),
|
||||||
Self::Interval => tr.browsing_average_interval(),
|
Self::Interval => tr.browsing_average_interval(),
|
||||||
Self::Reps => tr.scheduling_reviews(),
|
|
||||||
_ => return self.cards_mode_label(tr),
|
_ => return self.cards_mode_label(tr),
|
||||||
}
|
}
|
||||||
.into()
|
.into()
|
||||||
|
@ -196,6 +210,9 @@ impl Column {
|
||||||
| Column::Interval
|
| Column::Interval
|
||||||
| Column::NoteCreation
|
| Column::NoteCreation
|
||||||
| Column::NoteMod
|
| Column::NoteMod
|
||||||
|
| Column::Stability
|
||||||
|
| Column::Difficulty
|
||||||
|
| Column::Retrievability
|
||||||
| Column::Reps => Sorting::Descending,
|
| Column::Reps => Sorting::Descending,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -396,6 +413,9 @@ impl RowContext {
|
||||||
Column::NoteMod => self.note.mtime.date_and_time_string(),
|
Column::NoteMod => self.note.mtime.date_and_time_string(),
|
||||||
Column::Tags => self.note.tags.join(" "),
|
Column::Tags => self.note.tags.join(" "),
|
||||||
Column::Notetype => self.notetype.name.to_owned(),
|
Column::Notetype => self.notetype.name.to_owned(),
|
||||||
|
Column::Stability => self.fsrs_stability_str(),
|
||||||
|
Column::Difficulty => self.fsrs_difficulty_str(),
|
||||||
|
Column::Retrievability => self.fsrs_retrievability_str(),
|
||||||
Column::Custom => "".to_string(),
|
Column::Custom => "".to_string(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -450,6 +470,36 @@ impl RowContext {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn fsrs_stability_str(&self) -> String {
|
||||||
|
self.cards[0]
|
||||||
|
.fsrs_memory_state
|
||||||
|
.as_ref()
|
||||||
|
.map(|s| time_span(s.stability * 86400.0, &self.tr, false))
|
||||||
|
.unwrap_or_default()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fsrs_difficulty_str(&self) -> String {
|
||||||
|
self.cards[0]
|
||||||
|
.fsrs_memory_state
|
||||||
|
.as_ref()
|
||||||
|
.map(|s| format!("{:.0}%", (s.difficulty - 1.0) / 9.0 * 100.0))
|
||||||
|
.unwrap_or_default()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fsrs_retrievability_str(&self) -> String {
|
||||||
|
self.cards[0]
|
||||||
|
.fsrs_memory_state
|
||||||
|
.as_ref()
|
||||||
|
.zip(self.cards[0].days_since_last_review(&self.timing))
|
||||||
|
.map(|(state, days_elapsed)| {
|
||||||
|
let r = FSRS::new(None)
|
||||||
|
.unwrap()
|
||||||
|
.current_retrievability((*state).into(), days_elapsed);
|
||||||
|
format!("{:.0}%", r * 100.)
|
||||||
|
})
|
||||||
|
.unwrap_or_default()
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the due date of the next due card that is not in a filtered
|
/// Returns the due date of the next due card that is not in a filtered
|
||||||
/// deck, new, suspended or buried or the empty string if there is no
|
/// deck, new, suspended or buried or the empty string if there is no
|
||||||
/// such card.
|
/// such card.
|
||||||
|
|
|
@ -8,6 +8,7 @@ use std::collections::hash_map::Entry;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
|
||||||
|
use fsrs::MemoryState;
|
||||||
use num_enum::TryFromPrimitive;
|
use num_enum::TryFromPrimitive;
|
||||||
use serde_repr::Deserialize_repr;
|
use serde_repr::Deserialize_repr;
|
||||||
use serde_repr::Serialize_repr;
|
use serde_repr::Serialize_repr;
|
||||||
|
@ -71,7 +72,7 @@ pub enum CardQueueNumber {
|
||||||
Invalid,
|
Invalid,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub struct Card {
|
pub struct Card {
|
||||||
pub(crate) id: CardId,
|
pub(crate) id: CardId,
|
||||||
pub(crate) note_id: NoteId,
|
pub(crate) note_id: NoteId,
|
||||||
|
@ -92,11 +93,18 @@ pub struct Card {
|
||||||
pub(crate) flags: u8,
|
pub(crate) flags: u8,
|
||||||
/// The position in the new queue before leaving it.
|
/// The position in the new queue before leaving it.
|
||||||
pub(crate) original_position: Option<u32>,
|
pub(crate) original_position: Option<u32>,
|
||||||
|
pub(crate) fsrs_memory_state: Option<FsrsMemoryState>,
|
||||||
/// JSON object or empty; exposed through the reviewer for persisting custom
|
/// JSON object or empty; exposed through the reviewer for persisting custom
|
||||||
/// state
|
/// state
|
||||||
pub(crate) custom_data: String,
|
pub(crate) custom_data: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||||
|
pub struct FsrsMemoryState {
|
||||||
|
pub stability: f32,
|
||||||
|
pub difficulty: f32,
|
||||||
|
}
|
||||||
|
|
||||||
impl Default for Card {
|
impl Default for Card {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
@ -118,6 +126,7 @@ impl Default for Card {
|
||||||
original_deck_id: DeckId(0),
|
original_deck_id: DeckId(0),
|
||||||
flags: 0,
|
flags: 0,
|
||||||
original_position: None,
|
original_position: None,
|
||||||
|
fsrs_memory_state: None,
|
||||||
custom_data: String::new(),
|
custom_data: String::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -434,6 +443,24 @@ impl<'a> RemainingStepsAdjuster<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<FsrsMemoryState> for MemoryState {
|
||||||
|
fn from(value: FsrsMemoryState) -> Self {
|
||||||
|
MemoryState {
|
||||||
|
stability: value.stability,
|
||||||
|
difficulty: value.difficulty,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<MemoryState> for FsrsMemoryState {
|
||||||
|
fn from(value: MemoryState) -> Self {
|
||||||
|
FsrsMemoryState {
|
||||||
|
stability: value.stability,
|
||||||
|
difficulty: value.difficulty,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use crate::tests::open_test_collection_with_learning_card;
|
use crate::tests::open_test_collection_with_learning_card;
|
||||||
|
|
|
@ -4,6 +4,7 @@ use crate::card::Card;
|
||||||
use crate::card::CardId;
|
use crate::card::CardId;
|
||||||
use crate::card::CardQueue;
|
use crate::card::CardQueue;
|
||||||
use crate::card::CardType;
|
use crate::card::CardType;
|
||||||
|
use crate::card::FsrsMemoryState;
|
||||||
use crate::collection::Collection;
|
use crate::collection::Collection;
|
||||||
use crate::decks::DeckId;
|
use crate::decks::DeckId;
|
||||||
use crate::error;
|
use crate::error;
|
||||||
|
@ -99,6 +100,7 @@ impl TryFrom<anki_proto::cards::Card> for Card {
|
||||||
original_deck_id: DeckId(c.original_deck_id),
|
original_deck_id: DeckId(c.original_deck_id),
|
||||||
flags: c.flags as u8,
|
flags: c.flags as u8,
|
||||||
original_position: c.original_position,
|
original_position: c.original_position,
|
||||||
|
fsrs_memory_state: c.fsrs_memory_state.map(Into::into),
|
||||||
custom_data: c.custom_data,
|
custom_data: c.custom_data,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -125,6 +127,7 @@ impl From<Card> for anki_proto::cards::Card {
|
||||||
original_deck_id: c.original_deck_id.0,
|
original_deck_id: c.original_deck_id.0,
|
||||||
flags: c.flags as u32,
|
flags: c.flags as u32,
|
||||||
original_position: c.original_position.map(Into::into),
|
original_position: c.original_position.map(Into::into),
|
||||||
|
fsrs_memory_state: c.fsrs_memory_state.map(Into::into),
|
||||||
custom_data: c.custom_data,
|
custom_data: c.custom_data,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -139,3 +142,21 @@ impl From<anki_proto::cards::CardId> for CardId {
|
||||||
CardId(cid.cid)
|
CardId(cid.cid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<anki_proto::cards::FsrsMemoryState> for FsrsMemoryState {
|
||||||
|
fn from(value: anki_proto::cards::FsrsMemoryState) -> Self {
|
||||||
|
FsrsMemoryState {
|
||||||
|
stability: value.stability,
|
||||||
|
difficulty: value.difficulty,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<FsrsMemoryState> for anki_proto::cards::FsrsMemoryState {
|
||||||
|
fn from(value: FsrsMemoryState) -> Self {
|
||||||
|
anki_proto::cards::FsrsMemoryState {
|
||||||
|
stability: value.stability,
|
||||||
|
difficulty: value.difficulty,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -268,6 +268,12 @@ pub(crate) fn ensure_deck_config_values_valid(config: &mut DeckConfigInner) {
|
||||||
1,
|
1,
|
||||||
9999,
|
9999,
|
||||||
);
|
);
|
||||||
|
ensure_f32_valid(
|
||||||
|
&mut config.desired_retention,
|
||||||
|
default.desired_retention,
|
||||||
|
0.8,
|
||||||
|
0.97,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ensure_f32_valid(val: &mut f32, default: f32, min: f32, max: f32) {
|
fn ensure_f32_valid(val: &mut f32, default: f32, min: f32, max: f32) {
|
||||||
|
|
|
@ -15,6 +15,7 @@ use anki_proto::decks::deck::normal::DayLimit;
|
||||||
use crate::config::StringKey;
|
use crate::config::StringKey;
|
||||||
use crate::decks::NormalDeck;
|
use crate::decks::NormalDeck;
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
|
use crate::scheduler::fsrs::weights::Weights;
|
||||||
use crate::search::JoinSearches;
|
use crate::search::JoinSearches;
|
||||||
use crate::search::SearchNode;
|
use crate::search::SearchNode;
|
||||||
|
|
||||||
|
@ -130,6 +131,10 @@ impl Collection {
|
||||||
|
|
||||||
// add/update provided configs
|
// add/update provided configs
|
||||||
for conf in &mut input.configs {
|
for conf in &mut input.configs {
|
||||||
|
let weight_len = conf.inner.fsrs_weights.len();
|
||||||
|
if weight_len != 0 && weight_len != 17 {
|
||||||
|
return Err(AnkiError::FsrsWeightsInvalid);
|
||||||
|
}
|
||||||
self.add_or_update_deck_config(conf)?;
|
self.add_or_update_deck_config(conf)?;
|
||||||
configs_after_update.insert(conf.id, conf.clone());
|
configs_after_update.insert(conf.id, conf.clone());
|
||||||
}
|
}
|
||||||
|
@ -154,16 +159,22 @@ impl Collection {
|
||||||
let usn = self.usn()?;
|
let usn = self.usn()?;
|
||||||
let today = self.timing_today()?.days_elapsed;
|
let today = self.timing_today()?.days_elapsed;
|
||||||
let selected_config = input.configs.last().unwrap();
|
let selected_config = input.configs.last().unwrap();
|
||||||
|
let mut decks_needing_memory_recompute: HashMap<DeckConfigId, Vec<SearchNode>> =
|
||||||
|
Default::default();
|
||||||
for deck in self.storage.get_all_decks()? {
|
for deck in self.storage.get_all_decks()? {
|
||||||
if let Ok(normal) = deck.normal() {
|
if let Ok(normal) = deck.normal() {
|
||||||
let deck_id = deck.id;
|
let deck_id = deck.id;
|
||||||
|
|
||||||
// previous order
|
// previous order & weights
|
||||||
let previous_config_id = DeckConfigId(normal.config_id);
|
let previous_config_id = DeckConfigId(normal.config_id);
|
||||||
let previous_config = configs_before_update.get(&previous_config_id);
|
let previous_config = configs_before_update.get(&previous_config_id);
|
||||||
let previous_order = previous_config
|
let previous_order = previous_config
|
||||||
.map(|c| c.inner.new_card_insert_order())
|
.map(|c| c.inner.new_card_insert_order())
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
|
let previous_fsrs_on = previous_config
|
||||||
|
.map(|c| c.inner.fsrs_enabled)
|
||||||
|
.unwrap_or_default();
|
||||||
|
let previous_weights = previous_config.map(|c| &c.inner.fsrs_weights);
|
||||||
|
|
||||||
// if a selected (sub)deck, or its old config was removed, update deck to point
|
// if a selected (sub)deck, or its old config was removed, update deck to point
|
||||||
// to new config
|
// to new config
|
||||||
|
@ -188,10 +199,38 @@ impl Collection {
|
||||||
self.sort_deck(deck_id, current_order, usn)?;
|
self.sort_deck(deck_id, current_order, usn)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if weights differ, memory state needs to be recomputed
|
||||||
|
let current_fsrs_on = current_config
|
||||||
|
.map(|c| c.inner.fsrs_enabled)
|
||||||
|
.unwrap_or_default();
|
||||||
|
let current_weights = current_config.map(|c| &c.inner.fsrs_weights);
|
||||||
|
if current_fsrs_on && (!previous_fsrs_on || previous_weights != current_weights) {
|
||||||
|
decks_needing_memory_recompute
|
||||||
|
.entry(current_config_id)
|
||||||
|
.or_default()
|
||||||
|
.push(SearchNode::DeckIdWithoutChildren(deck_id));
|
||||||
|
}
|
||||||
|
|
||||||
self.adjust_remaining_steps_in_deck(deck_id, previous_config, current_config, usn)?;
|
self.adjust_remaining_steps_in_deck(deck_id, previous_config, current_config, usn)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !decks_needing_memory_recompute.is_empty() {
|
||||||
|
let input: Vec<(Weights, Vec<SearchNode>)> = decks_needing_memory_recompute
|
||||||
|
.into_iter()
|
||||||
|
.map(|(conf_id, search)| {
|
||||||
|
let weights = configs_after_update
|
||||||
|
.get(&conf_id)
|
||||||
|
.or_not_found(conf_id)?
|
||||||
|
.inner
|
||||||
|
.fsrs_weights
|
||||||
|
.clone();
|
||||||
|
Ok((weights, search))
|
||||||
|
})
|
||||||
|
.collect::<Result<_>>()?;
|
||||||
|
self.update_memory_state(input)?;
|
||||||
|
}
|
||||||
|
|
||||||
self.set_config_string_inner(StringKey::CardStateCustomizer, &input.card_state_customizer)?;
|
self.set_config_string_inner(StringKey::CardStateCustomizer, &input.card_state_customizer)?;
|
||||||
self.set_config_bool_inner(
|
self.set_config_bool_inner(
|
||||||
BoolKey::NewCardsIgnoreReviewLimit,
|
BoolKey::NewCardsIgnoreReviewLimit,
|
||||||
|
|
|
@ -113,6 +113,8 @@ pub enum AnkiError {
|
||||||
},
|
},
|
||||||
InvalidMethodIndex,
|
InvalidMethodIndex,
|
||||||
InvalidServiceIndex,
|
InvalidServiceIndex,
|
||||||
|
FsrsWeightsInvalid,
|
||||||
|
FsrsInsufficientData,
|
||||||
}
|
}
|
||||||
|
|
||||||
// error helpers
|
// error helpers
|
||||||
|
@ -164,6 +166,8 @@ impl AnkiError {
|
||||||
AnkiError::FileIoError { source } => source.message(),
|
AnkiError::FileIoError { source } => source.message(),
|
||||||
AnkiError::InvalidInput { source } => source.message(),
|
AnkiError::InvalidInput { source } => source.message(),
|
||||||
AnkiError::NotFound { source } => source.message(tr),
|
AnkiError::NotFound { source } => source.message(tr),
|
||||||
|
AnkiError::FsrsInsufficientData => tr.deck_config_not_enough_history().into(),
|
||||||
|
AnkiError::FsrsWeightsInvalid => tr.deck_config_invalid_weights().into(),
|
||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
AnkiError::WindowsError { source } => format!("{source:?}"),
|
AnkiError::WindowsError { source } => format!("{source:?}"),
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,9 @@ const LOG_ROTATE_BYTES: u64 = 50 * 1024 * 1024;
|
||||||
|
|
||||||
/// Enable logging to the console, and optionally also to a file.
|
/// Enable logging to the console, and optionally also to a file.
|
||||||
pub fn set_global_logger(path: Option<&str>) -> Result<()> {
|
pub fn set_global_logger(path: Option<&str>) -> Result<()> {
|
||||||
|
if std::env::var("BURN_LOG").is_ok() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
static ONCE: OnceCell<()> = OnceCell::new();
|
static ONCE: OnceCell<()> = OnceCell::new();
|
||||||
ONCE.get_or_try_init(|| -> Result<()> {
|
ONCE.get_or_try_init(|| -> Result<()> {
|
||||||
let file_writer = if let Some(path) = path {
|
let file_writer = if let Some(path) = path {
|
||||||
|
|
|
@ -6,8 +6,7 @@ use std::sync::Arc;
|
||||||
use std::sync::Mutex;
|
use std::sync::Mutex;
|
||||||
|
|
||||||
use anki_i18n::I18n;
|
use anki_i18n::I18n;
|
||||||
use anki_proto::collection::progress::ComputeRetention;
|
use anki_proto::collection::progress::Value;
|
||||||
use anki_proto::collection::progress::ComputeWeights;
|
|
||||||
|
|
||||||
use crate::dbcheck::DatabaseCheckProgress;
|
use crate::dbcheck::DatabaseCheckProgress;
|
||||||
use crate::error::AnkiError;
|
use crate::error::AnkiError;
|
||||||
|
@ -15,6 +14,7 @@ use crate::error::Result;
|
||||||
use crate::import_export::ExportProgress;
|
use crate::import_export::ExportProgress;
|
||||||
use crate::import_export::ImportProgress;
|
use crate::import_export::ImportProgress;
|
||||||
use crate::prelude::Collection;
|
use crate::prelude::Collection;
|
||||||
|
use crate::scheduler::fsrs::memory_state::ComputeMemoryProgress;
|
||||||
use crate::scheduler::fsrs::retention::ComputeRetentionProgress;
|
use crate::scheduler::fsrs::retention::ComputeRetentionProgress;
|
||||||
use crate::scheduler::fsrs::weights::ComputeWeightsProgress;
|
use crate::scheduler::fsrs::weights::ComputeWeightsProgress;
|
||||||
use crate::sync::collection::normal::NormalSyncProgress;
|
use crate::sync::collection::normal::NormalSyncProgress;
|
||||||
|
@ -133,6 +133,7 @@ pub enum Progress {
|
||||||
Export(ExportProgress),
|
Export(ExportProgress),
|
||||||
ComputeWeights(ComputeWeightsProgress),
|
ComputeWeights(ComputeWeightsProgress),
|
||||||
ComputeRetention(ComputeRetentionProgress),
|
ComputeRetention(ComputeRetentionProgress),
|
||||||
|
ComputeMemory(ComputeMemoryProgress),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn progress_to_proto(
|
pub(crate) fn progress_to_proto(
|
||||||
|
@ -141,18 +142,12 @@ pub(crate) fn progress_to_proto(
|
||||||
) -> anki_proto::collection::Progress {
|
) -> anki_proto::collection::Progress {
|
||||||
let progress = if let Some(progress) = progress {
|
let progress = if let Some(progress) = progress {
|
||||||
match progress {
|
match progress {
|
||||||
Progress::MediaSync(p) => {
|
Progress::MediaSync(p) => Value::MediaSync(media_sync_progress(p, tr)),
|
||||||
anki_proto::collection::progress::Value::MediaSync(media_sync_progress(p, tr))
|
Progress::MediaCheck(n) => Value::MediaCheck(tr.media_check_checked(n.checked).into()),
|
||||||
}
|
Progress::FullSync(p) => Value::FullSync(anki_proto::collection::progress::FullSync {
|
||||||
Progress::MediaCheck(n) => anki_proto::collection::progress::Value::MediaCheck(
|
|
||||||
tr.media_check_checked(n.checked).into(),
|
|
||||||
),
|
|
||||||
Progress::FullSync(p) => anki_proto::collection::progress::Value::FullSync(
|
|
||||||
anki_proto::collection::progress::FullSync {
|
|
||||||
transferred: p.transferred_bytes as u32,
|
transferred: p.transferred_bytes as u32,
|
||||||
total: p.total_bytes as u32,
|
total: p.total_bytes as u32,
|
||||||
},
|
}),
|
||||||
),
|
|
||||||
Progress::NormalSync(p) => {
|
Progress::NormalSync(p) => {
|
||||||
let stage = match p.stage {
|
let stage = match p.stage {
|
||||||
SyncStage::Connecting => tr.sync_syncing(),
|
SyncStage::Connecting => tr.sync_syncing(),
|
||||||
|
@ -166,13 +161,11 @@ pub(crate) fn progress_to_proto(
|
||||||
let removed = tr
|
let removed = tr
|
||||||
.sync_media_removed_count(p.local_remove, p.remote_remove)
|
.sync_media_removed_count(p.local_remove, p.remote_remove)
|
||||||
.into();
|
.into();
|
||||||
anki_proto::collection::progress::Value::NormalSync(
|
Value::NormalSync(anki_proto::collection::progress::NormalSync {
|
||||||
anki_proto::collection::progress::NormalSync {
|
|
||||||
stage,
|
stage,
|
||||||
added,
|
added,
|
||||||
removed,
|
removed,
|
||||||
},
|
})
|
||||||
)
|
|
||||||
}
|
}
|
||||||
Progress::DatabaseCheck(p) => {
|
Progress::DatabaseCheck(p) => {
|
||||||
let mut stage_total = 0;
|
let mut stage_total = 0;
|
||||||
|
@ -189,15 +182,13 @@ pub(crate) fn progress_to_proto(
|
||||||
DatabaseCheckProgress::History => tr.database_check_checking_history(),
|
DatabaseCheckProgress::History => tr.database_check_checking_history(),
|
||||||
}
|
}
|
||||||
.to_string();
|
.to_string();
|
||||||
anki_proto::collection::progress::Value::DatabaseCheck(
|
Value::DatabaseCheck(anki_proto::collection::progress::DatabaseCheck {
|
||||||
anki_proto::collection::progress::DatabaseCheck {
|
|
||||||
stage,
|
stage,
|
||||||
stage_total: stage_total as u32,
|
stage_total: stage_total as u32,
|
||||||
stage_current: stage_current as u32,
|
stage_current: stage_current as u32,
|
||||||
},
|
})
|
||||||
)
|
|
||||||
}
|
}
|
||||||
Progress::Import(progress) => anki_proto::collection::progress::Value::Importing(
|
Progress::Import(progress) => Value::Importing(
|
||||||
match progress {
|
match progress {
|
||||||
ImportProgress::File => tr.importing_importing_file(),
|
ImportProgress::File => tr.importing_importing_file(),
|
||||||
ImportProgress::Media(n) => tr.importing_processed_media_file(n),
|
ImportProgress::Media(n) => tr.importing_processed_media_file(n),
|
||||||
|
@ -208,7 +199,7 @@ pub(crate) fn progress_to_proto(
|
||||||
}
|
}
|
||||||
.into(),
|
.into(),
|
||||||
),
|
),
|
||||||
Progress::Export(progress) => anki_proto::collection::progress::Value::Exporting(
|
Progress::Export(progress) => Value::Exporting(
|
||||||
match progress {
|
match progress {
|
||||||
ExportProgress::File => tr.exporting_exporting_file(),
|
ExportProgress::File => tr.exporting_exporting_file(),
|
||||||
ExportProgress::Media(n) => tr.exporting_processed_media_files(n),
|
ExportProgress::Media(n) => tr.exporting_processed_media_files(n),
|
||||||
|
@ -219,21 +210,30 @@ pub(crate) fn progress_to_proto(
|
||||||
.into(),
|
.into(),
|
||||||
),
|
),
|
||||||
Progress::ComputeWeights(progress) => {
|
Progress::ComputeWeights(progress) => {
|
||||||
anki_proto::collection::progress::Value::ComputeWeights(ComputeWeights {
|
Value::ComputeWeights(anki_proto::collection::ComputeWeightsProgress {
|
||||||
current: progress.current,
|
current: progress.current,
|
||||||
total: progress.total,
|
total: progress.total,
|
||||||
revlog_entries: progress.revlog_entries,
|
fsrs_items: progress.fsrs_items,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
Progress::ComputeRetention(progress) => {
|
Progress::ComputeRetention(progress) => {
|
||||||
anki_proto::collection::progress::Value::ComputeRetention(ComputeRetention {
|
Value::ComputeRetention(anki_proto::collection::ComputeRetentionProgress {
|
||||||
current: progress.current,
|
current: progress.current,
|
||||||
total: progress.total,
|
total: progress.total,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
Progress::ComputeMemory(progress) => {
|
||||||
|
Value::ComputeMemory(anki_proto::collection::ComputeMemoryProgress {
|
||||||
|
current_cards: progress.current_cards,
|
||||||
|
total_cards: progress.total_cards,
|
||||||
|
label: tr
|
||||||
|
.deck_config_updating_cards(progress.current_cards, progress.total_cards)
|
||||||
|
.into(),
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
anki_proto::collection::progress::Value::None(anki_proto::generic::Empty {})
|
Value::None(anki_proto::generic::Empty {})
|
||||||
};
|
};
|
||||||
anki_proto::collection::Progress {
|
anki_proto::collection::Progress {
|
||||||
value: Some(progress),
|
value: Some(progress),
|
||||||
|
@ -306,6 +306,12 @@ impl From<ComputeRetentionProgress> for Progress {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<ComputeMemoryProgress> for Progress {
|
||||||
|
fn from(p: ComputeMemoryProgress) -> Self {
|
||||||
|
Progress::ComputeMemory(p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Collection {
|
impl Collection {
|
||||||
pub fn new_progress_handler<P: Into<Progress> + Default + Clone>(
|
pub fn new_progress_handler<P: Into<Progress> + Default + Clone>(
|
||||||
&self,
|
&self,
|
||||||
|
|
|
@ -32,7 +32,7 @@ impl From<TimestampMillis> for RevlogId {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize_tuple, Deserialize, Debug, Default, PartialEq, Eq)]
|
#[derive(Serialize_tuple, Deserialize, Debug, Default, PartialEq, Eq, Clone)]
|
||||||
pub struct RevlogEntry {
|
pub struct RevlogEntry {
|
||||||
pub id: RevlogId,
|
pub id: RevlogId,
|
||||||
pub cid: CardId,
|
pub cid: CardId,
|
||||||
|
|
|
@ -63,6 +63,7 @@ impl CardStateUpdater {
|
||||||
let lapses = self.card.lapses;
|
let lapses = self.card.lapses;
|
||||||
let ease_factor = self.card.ease_factor();
|
let ease_factor = self.card.ease_factor();
|
||||||
let remaining_steps = self.card.remaining_steps();
|
let remaining_steps = self.card.remaining_steps();
|
||||||
|
let fsrs_memory_state = self.card.fsrs_memory_state;
|
||||||
|
|
||||||
match self.card.ctype {
|
match self.card.ctype {
|
||||||
CardType::New => NormalState::New(NewState {
|
CardType::New => NormalState::New(NewState {
|
||||||
|
@ -72,6 +73,7 @@ impl CardStateUpdater {
|
||||||
LearnState {
|
LearnState {
|
||||||
scheduled_secs: self.learn_steps().current_delay_secs(remaining_steps),
|
scheduled_secs: self.learn_steps().current_delay_secs(remaining_steps),
|
||||||
remaining_steps,
|
remaining_steps,
|
||||||
|
fsrs_memory_state,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
.into(),
|
.into(),
|
||||||
|
@ -82,12 +84,14 @@ impl CardStateUpdater {
|
||||||
ease_factor,
|
ease_factor,
|
||||||
lapses,
|
lapses,
|
||||||
leeched: false,
|
leeched: false,
|
||||||
|
fsrs_memory_state,
|
||||||
}
|
}
|
||||||
.into(),
|
.into(),
|
||||||
CardType::Relearn => RelearnState {
|
CardType::Relearn => RelearnState {
|
||||||
learning: LearnState {
|
learning: LearnState {
|
||||||
scheduled_secs: self.relearn_steps().current_delay_secs(remaining_steps),
|
scheduled_secs: self.relearn_steps().current_delay_secs(remaining_steps),
|
||||||
remaining_steps,
|
remaining_steps,
|
||||||
|
fsrs_memory_state,
|
||||||
},
|
},
|
||||||
review: ReviewState {
|
review: ReviewState {
|
||||||
scheduled_days: interval,
|
scheduled_days: interval,
|
||||||
|
@ -95,6 +99,7 @@ impl CardStateUpdater {
|
||||||
ease_factor,
|
ease_factor,
|
||||||
lapses,
|
lapses,
|
||||||
leeched: false,
|
leeched: false,
|
||||||
|
fsrs_memory_state,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
.into(),
|
.into(),
|
||||||
|
|
|
@ -24,6 +24,7 @@ impl CardStateUpdater {
|
||||||
self.card.queue = CardQueue::New;
|
self.card.queue = CardQueue::New;
|
||||||
self.card.due = next.position as i32;
|
self.card.due = next.position as i32;
|
||||||
self.card.original_position = None;
|
self.card.original_position = None;
|
||||||
|
self.card.fsrs_memory_state = None;
|
||||||
|
|
||||||
RevlogEntryPartial::new(current, next.into(), 0.0, self.secs_until_rollover())
|
RevlogEntryPartial::new(current, next.into(), 0.0, self.secs_until_rollover())
|
||||||
}
|
}
|
||||||
|
@ -38,6 +39,7 @@ impl CardStateUpdater {
|
||||||
if let Some(position) = current.new_position() {
|
if let Some(position) = current.new_position() {
|
||||||
self.card.original_position = Some(position)
|
self.card.original_position = Some(position)
|
||||||
}
|
}
|
||||||
|
self.card.fsrs_memory_state = next.fsrs_memory_state;
|
||||||
|
|
||||||
let interval = next
|
let interval = next
|
||||||
.interval_kind()
|
.interval_kind()
|
||||||
|
|
|
@ -8,6 +8,9 @@ mod relearning;
|
||||||
mod review;
|
mod review;
|
||||||
mod revlog;
|
mod revlog;
|
||||||
|
|
||||||
|
use fsrs::MemoryState;
|
||||||
|
use fsrs::NextStates;
|
||||||
|
use fsrs::FSRS;
|
||||||
use rand::prelude::*;
|
use rand::prelude::*;
|
||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
use revlog::RevlogEntryPartial;
|
use revlog::RevlogEntryPartial;
|
||||||
|
@ -22,10 +25,13 @@ use super::states::StateContext;
|
||||||
use super::timespan::answer_button_time_collapsible;
|
use super::timespan::answer_button_time_collapsible;
|
||||||
use super::timing::SchedTimingToday;
|
use super::timing::SchedTimingToday;
|
||||||
use crate::card::CardQueue;
|
use crate::card::CardQueue;
|
||||||
|
use crate::card::CardType;
|
||||||
use crate::deckconfig::DeckConfig;
|
use crate::deckconfig::DeckConfig;
|
||||||
use crate::deckconfig::LeechAction;
|
use crate::deckconfig::LeechAction;
|
||||||
use crate::decks::Deck;
|
use crate::decks::Deck;
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
|
use crate::scheduler::fsrs::weights::fsrs_items_for_memory_state;
|
||||||
|
use crate::search::SearchNode;
|
||||||
|
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
pub enum Rating {
|
pub enum Rating {
|
||||||
|
@ -60,6 +66,8 @@ struct CardStateUpdater {
|
||||||
timing: SchedTimingToday,
|
timing: SchedTimingToday,
|
||||||
now: TimestampSecs,
|
now: TimestampSecs,
|
||||||
fuzz_seed: Option<u64>,
|
fuzz_seed: Option<u64>,
|
||||||
|
/// Set if FSRS is enabled.
|
||||||
|
fsrs_next_states: Option<NextStates>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CardStateUpdater {
|
impl CardStateUpdater {
|
||||||
|
@ -87,6 +95,7 @@ impl CardStateUpdater {
|
||||||
} else {
|
} else {
|
||||||
0
|
0
|
||||||
},
|
},
|
||||||
|
fsrs_next_states: self.fsrs_next_states.clone(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -342,6 +351,29 @@ impl Collection {
|
||||||
.get_deck(card.deck_id)?
|
.get_deck(card.deck_id)?
|
||||||
.or_not_found(card.deck_id)?;
|
.or_not_found(card.deck_id)?;
|
||||||
let config = self.home_deck_config(deck.config_id(), card.original_deck_id)?;
|
let config = self.home_deck_config(deck.config_id(), card.original_deck_id)?;
|
||||||
|
let fsrs_next_states = if config.inner.fsrs_enabled {
|
||||||
|
let fsrs = FSRS::new(Some(&config.inner.fsrs_weights))?;
|
||||||
|
let memory_state = if let Some(state) = card.fsrs_memory_state {
|
||||||
|
Some(MemoryState::from(state))
|
||||||
|
} else if card.ctype == CardType::New {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
// Card has been moved or imported into an FSRS deck after weights were set,
|
||||||
|
// and will need its initial memory state to be calculated based on review
|
||||||
|
// history.
|
||||||
|
let revlog = self.revlog_for_srs(SearchNode::CardIds(card.id.to_string()))?;
|
||||||
|
let mut fsrs_items = fsrs_items_for_memory_state(revlog, timing.next_day_at);
|
||||||
|
fsrs_items.pop().map(|(_cid, item)| fsrs.memory_state(item))
|
||||||
|
};
|
||||||
|
Some(fsrs.next_states(
|
||||||
|
memory_state,
|
||||||
|
config.inner.desired_retention,
|
||||||
|
card.days_since_last_review(&timing).unwrap_or_default(),
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
Ok(CardStateUpdater {
|
Ok(CardStateUpdater {
|
||||||
fuzz_seed: get_fuzz_seed(&card),
|
fuzz_seed: get_fuzz_seed(&card),
|
||||||
card,
|
card,
|
||||||
|
@ -349,6 +381,7 @@ impl Collection {
|
||||||
config,
|
config,
|
||||||
timing,
|
timing,
|
||||||
now: TimestampSecs::now(),
|
now: TimestampSecs::now(),
|
||||||
|
fsrs_next_states,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,7 @@ impl CardStateUpdater {
|
||||||
if let Some(position) = current.new_position() {
|
if let Some(position) = current.new_position() {
|
||||||
self.card.original_position = Some(position)
|
self.card.original_position = Some(position)
|
||||||
}
|
}
|
||||||
|
self.card.fsrs_memory_state = next.learning.fsrs_memory_state;
|
||||||
|
|
||||||
let interval = next
|
let interval = next
|
||||||
.interval_kind()
|
.interval_kind()
|
||||||
|
|
|
@ -24,6 +24,7 @@ impl CardStateUpdater {
|
||||||
if let Some(position) = current.new_position() {
|
if let Some(position) = current.new_position() {
|
||||||
self.card.original_position = Some(position)
|
self.card.original_position = Some(position)
|
||||||
}
|
}
|
||||||
|
self.card.fsrs_memory_state = next.fsrs_memory_state;
|
||||||
|
|
||||||
RevlogEntryPartial::new(
|
RevlogEntryPartial::new(
|
||||||
current,
|
current,
|
||||||
|
|
|
@ -1,20 +1,16 @@
|
||||||
// Copyright: Ankitects Pty Ltd and contributors
|
// Copyright: Ankitects Pty Ltd and contributors
|
||||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
use fsrs_optimizer::FSRSError;
|
|
||||||
|
use fsrs::FSRSError;
|
||||||
|
|
||||||
use crate::error::AnkiError;
|
use crate::error::AnkiError;
|
||||||
use crate::error::InvalidInputError;
|
|
||||||
|
|
||||||
impl From<FSRSError> for AnkiError {
|
impl From<FSRSError> for AnkiError {
|
||||||
fn from(err: FSRSError) -> Self {
|
fn from(err: FSRSError) -> Self {
|
||||||
match err {
|
match err {
|
||||||
FSRSError::NotEnoughData => InvalidInputError {
|
FSRSError::NotEnoughData => AnkiError::FsrsInsufficientData,
|
||||||
message: "Not enough data available".to_string(),
|
|
||||||
source: None,
|
|
||||||
backtrace: None,
|
|
||||||
}
|
|
||||||
.into(),
|
|
||||||
FSRSError::Interrupted => AnkiError::Interrupted,
|
FSRSError::Interrupted => AnkiError::Interrupted,
|
||||||
|
FSRSError::InvalidWeights => AnkiError::FsrsWeightsInvalid,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
49
rslib/src/scheduler/fsrs/memory_state.rs
Normal file
49
rslib/src/scheduler/fsrs/memory_state.rs
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
// Copyright: Ankitects Pty Ltd and contributors
|
||||||
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
|
use fsrs::FSRS;
|
||||||
|
|
||||||
|
use crate::prelude::*;
|
||||||
|
use crate::scheduler::fsrs::weights::fsrs_items_for_memory_state;
|
||||||
|
use crate::scheduler::fsrs::weights::Weights;
|
||||||
|
use crate::search::JoinSearches;
|
||||||
|
use crate::search::Negated;
|
||||||
|
use crate::search::SearchNode;
|
||||||
|
use crate::search::StateKind;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, Default)]
|
||||||
|
pub struct ComputeMemoryProgress {
|
||||||
|
pub current_cards: u32,
|
||||||
|
pub total_cards: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Collection {
|
||||||
|
/// For each provided set of weights, locate cards with the provided search,
|
||||||
|
/// and update their memory state.
|
||||||
|
/// Should be called inside a transaction.
|
||||||
|
pub(crate) fn update_memory_state(
|
||||||
|
&mut self,
|
||||||
|
entries: Vec<(Weights, Vec<SearchNode>)>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let timing = self.timing_today()?;
|
||||||
|
let usn = self.usn()?;
|
||||||
|
for (weights, search) in entries {
|
||||||
|
let search = SearchBuilder::any(search.into_iter())
|
||||||
|
.and(SearchNode::State(StateKind::New).negated());
|
||||||
|
let revlog = self.revlog_for_srs(search)?;
|
||||||
|
let items = fsrs_items_for_memory_state(revlog, timing.next_day_at);
|
||||||
|
let fsrs = FSRS::new(Some(&weights))?;
|
||||||
|
let mut progress = self.new_progress_handler::<ComputeMemoryProgress>();
|
||||||
|
progress.update(false, |s| s.total_cards = items.len() as u32)?;
|
||||||
|
for (idx, (card_id, item)) in items.into_iter().enumerate() {
|
||||||
|
progress.update(true, |state| state.current_cards = idx as u32 + 1)?;
|
||||||
|
let state = fsrs.memory_state(item);
|
||||||
|
let mut card = self.storage.get_card(card_id)?.or_not_found(card_id)?;
|
||||||
|
let original = card.clone();
|
||||||
|
card.fsrs_memory_state = Some(state.into());
|
||||||
|
self.update_card_inner(&mut card, original, usn)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,5 +1,7 @@
|
||||||
// Copyright: Ankitects Pty Ltd and contributors
|
// Copyright: Ankitects Pty Ltd and contributors
|
||||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
mod error;
|
mod error;
|
||||||
|
pub mod memory_state;
|
||||||
pub mod retention;
|
pub mod retention;
|
||||||
|
pub mod try_collect;
|
||||||
pub mod weights;
|
pub mod weights;
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
// Copyright: Ankitects Pty Ltd and contributors
|
// Copyright: Ankitects Pty Ltd and contributors
|
||||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
use anki_proto::scheduler::ComputeOptimalRetentionRequest;
|
use anki_proto::scheduler::ComputeOptimalRetentionRequest;
|
||||||
use fsrs_optimizer::find_optimal_retention;
|
use fsrs::SimulatorConfig;
|
||||||
use fsrs_optimizer::SimulatorConfig;
|
use fsrs::FSRS;
|
||||||
use itertools::Itertools;
|
|
||||||
|
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
|
|
||||||
|
@ -19,24 +19,33 @@ impl Collection {
|
||||||
req: ComputeOptimalRetentionRequest,
|
req: ComputeOptimalRetentionRequest,
|
||||||
) -> Result<f32> {
|
) -> Result<f32> {
|
||||||
let mut anki_progress = self.new_progress_handler::<ComputeRetentionProgress>();
|
let mut anki_progress = self.new_progress_handler::<ComputeRetentionProgress>();
|
||||||
if req.weights.len() != 17 {
|
let fsrs = FSRS::new(None)?;
|
||||||
invalid_input!("must have 17 weights");
|
Ok(fsrs.optimal_retention(
|
||||||
}
|
|
||||||
let mut weights = [0f64; 17];
|
|
||||||
weights
|
|
||||||
.iter_mut()
|
|
||||||
.set_from(req.weights.into_iter().map(|v| v as f64));
|
|
||||||
Ok(find_optimal_retention(
|
|
||||||
&SimulatorConfig {
|
&SimulatorConfig {
|
||||||
w: weights,
|
|
||||||
deck_size: req.deck_size as usize,
|
deck_size: req.deck_size as usize,
|
||||||
learn_span: req.days_to_simulate as usize,
|
learn_span: req.days_to_simulate as usize,
|
||||||
max_cost_perday: req.max_seconds_of_study_per_day as f64,
|
max_cost_perday: req.max_seconds_of_study_per_day as f64,
|
||||||
max_ivl: req.max_interval as f64,
|
max_ivl: req.max_interval as f64,
|
||||||
recall_cost: req.recall_secs as f64,
|
recall_costs: [
|
||||||
|
req.recall_secs_hard,
|
||||||
|
req.recall_secs_good,
|
||||||
|
req.recall_secs_easy,
|
||||||
|
],
|
||||||
forget_cost: req.forget_secs as f64,
|
forget_cost: req.forget_secs as f64,
|
||||||
learn_cost: req.learn_secs as f64,
|
learn_cost: req.learn_secs as f64,
|
||||||
|
first_rating_prob: [
|
||||||
|
req.first_rating_probability_again,
|
||||||
|
req.first_rating_probability_hard,
|
||||||
|
req.first_rating_probability_good,
|
||||||
|
req.first_rating_probability_easy,
|
||||||
|
],
|
||||||
|
review_rating_prob: [
|
||||||
|
req.review_rating_probability_hard,
|
||||||
|
req.review_rating_probability_good,
|
||||||
|
req.review_rating_probability_easy,
|
||||||
|
],
|
||||||
},
|
},
|
||||||
|
&req.weights,
|
||||||
|ip| {
|
|ip| {
|
||||||
anki_progress
|
anki_progress
|
||||||
.update(false, |p| {
|
.update(false, |p| {
|
||||||
|
|
34
rslib/src/scheduler/fsrs/try_collect.rs
Normal file
34
rslib/src/scheduler/fsrs/try_collect.rs
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
// Copyright: Ankitects Pty Ltd and contributors
|
||||||
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
|
use crate::error::AnkiError;
|
||||||
|
use crate::invalid_input;
|
||||||
|
|
||||||
|
// Roll our own implementation until this becomes stable
|
||||||
|
// https://github.com/rust-lang/rust/issues/94047
|
||||||
|
#[allow(unused)]
|
||||||
|
pub(crate) trait TryCollect: ExactSizeIterator {
|
||||||
|
fn try_collect<const N: usize>(self) -> Result<[Self::Item; N], AnkiError>
|
||||||
|
where
|
||||||
|
// Self: Sized,
|
||||||
|
Self::Item: Copy + Default;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<I, T> TryCollect for I
|
||||||
|
where
|
||||||
|
I: ExactSizeIterator<Item = T>,
|
||||||
|
T: Copy + Default,
|
||||||
|
{
|
||||||
|
fn try_collect<const N: usize>(self) -> Result<[T; N], AnkiError> {
|
||||||
|
if self.len() != N {
|
||||||
|
invalid_input!("expected {N}; got {}", self.len());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut result = [T::default(); N];
|
||||||
|
for (index, value) in self.enumerate() {
|
||||||
|
result[index] = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
}
|
|
@ -4,11 +4,12 @@ use std::iter;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use fsrs_optimizer::compute_weights;
|
use anki_proto::scheduler::ComputeFsrsWeightsResponse;
|
||||||
use fsrs_optimizer::evaluate;
|
use fsrs::FSRSItem;
|
||||||
use fsrs_optimizer::FSRSItem;
|
use fsrs::FSRSReview;
|
||||||
use fsrs_optimizer::FSRSReview;
|
use fsrs::ModelEvaluation;
|
||||||
use fsrs_optimizer::ProgressState;
|
use fsrs::ProgressState;
|
||||||
|
use fsrs::FSRS;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
|
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
|
@ -16,17 +17,16 @@ use crate::revlog::RevlogEntry;
|
||||||
use crate::revlog::RevlogReviewKind;
|
use crate::revlog::RevlogReviewKind;
|
||||||
use crate::search::SortMode;
|
use crate::search::SortMode;
|
||||||
|
|
||||||
|
pub(crate) type Weights = Vec<f32>;
|
||||||
|
|
||||||
impl Collection {
|
impl Collection {
|
||||||
pub fn compute_weights(&mut self, search: &str) -> Result<Vec<f32>> {
|
pub fn compute_weights(&mut self, search: &str) -> Result<ComputeFsrsWeightsResponse> {
|
||||||
let timing = self.timing_today()?;
|
let timing = self.timing_today()?;
|
||||||
|
let revlogs = self.revlog_for_srs(search)?;
|
||||||
|
let items = fsrs_items_for_training(revlogs, timing.next_day_at);
|
||||||
|
let fsrs_items = items.len() as u32;
|
||||||
let mut anki_progress = self.new_progress_handler::<ComputeWeightsProgress>();
|
let mut anki_progress = self.new_progress_handler::<ComputeWeightsProgress>();
|
||||||
let guard = self.search_cards_into_table(search, SortMode::NoOrder)?;
|
anki_progress.update(false, |p| p.fsrs_items = fsrs_items)?;
|
||||||
let revlogs = guard
|
|
||||||
.col
|
|
||||||
.storage
|
|
||||||
.get_revlog_entries_for_searched_cards_in_order()?;
|
|
||||||
anki_progress.state.revlog_entries = revlogs.len() as u32;
|
|
||||||
let items = anki_to_fsrs(revlogs, timing.next_day_at);
|
|
||||||
// adapt the progress handler to our built-in progress handling
|
// adapt the progress handler to our built-in progress handling
|
||||||
let progress = ProgressState::new_shared();
|
let progress = ProgressState::new_shared();
|
||||||
let progress2 = progress.clone();
|
let progress2 = progress.clone();
|
||||||
|
@ -45,26 +45,36 @@ impl Collection {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
compute_weights(items, Some(progress2)).map_err(Into::into)
|
let fsrs = FSRS::new(None)?;
|
||||||
|
let weights = fsrs.compute_weights(items, Some(progress2))?;
|
||||||
|
Ok(ComputeFsrsWeightsResponse {
|
||||||
|
weights,
|
||||||
|
fsrs_items,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn evaluate_weights(&mut self, weights: &[f32], search: &str) -> Result<(f32, f32)> {
|
pub(crate) fn revlog_for_srs(
|
||||||
let timing = self.timing_today()?;
|
&mut self,
|
||||||
if weights.len() != 17 {
|
search: impl TryIntoSearch,
|
||||||
invalid_input!("must have 17 weights");
|
) -> Result<Vec<RevlogEntry>> {
|
||||||
|
self.search_cards_into_table(search, SortMode::NoOrder)?
|
||||||
|
.col
|
||||||
|
.storage
|
||||||
|
.get_revlog_entries_for_searched_cards_in_order()
|
||||||
}
|
}
|
||||||
let mut weights_arr = [0f32; 17];
|
|
||||||
weights_arr.iter_mut().set_from(weights.iter().cloned());
|
pub fn evaluate_weights(&mut self, weights: &Weights, search: &str) -> Result<ModelEvaluation> {
|
||||||
|
let timing = self.timing_today()?;
|
||||||
let mut anki_progress = self.new_progress_handler::<ComputeWeightsProgress>();
|
let mut anki_progress = self.new_progress_handler::<ComputeWeightsProgress>();
|
||||||
let guard = self.search_cards_into_table(search, SortMode::NoOrder)?;
|
let guard = self.search_cards_into_table(search, SortMode::NoOrder)?;
|
||||||
let revlogs = guard
|
let revlogs = guard
|
||||||
.col
|
.col
|
||||||
.storage
|
.storage
|
||||||
.get_revlog_entries_for_searched_cards_in_order()?;
|
.get_revlog_entries_for_searched_cards_in_order()?;
|
||||||
anki_progress.state.revlog_entries = revlogs.len() as u32;
|
anki_progress.state.fsrs_items = revlogs.len() as u32;
|
||||||
let items = anki_to_fsrs(revlogs, timing.next_day_at);
|
let items = fsrs_items_for_training(revlogs, timing.next_day_at);
|
||||||
|
let fsrs = FSRS::new(Some(weights))?;
|
||||||
Ok(evaluate(weights_arr, items, |ip| {
|
Ok(fsrs.evaluate(items, |ip| {
|
||||||
anki_progress
|
anki_progress
|
||||||
.update(false, |p| {
|
.update(false, |p| {
|
||||||
p.total = ip.total as u32;
|
p.total = ip.total as u32;
|
||||||
|
@ -79,56 +89,87 @@ impl Collection {
|
||||||
pub struct ComputeWeightsProgress {
|
pub struct ComputeWeightsProgress {
|
||||||
pub current: u32,
|
pub current: u32,
|
||||||
pub total: u32,
|
pub total: u32,
|
||||||
pub revlog_entries: u32,
|
pub fsrs_items: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert a series of revlog entries sorted by card id into FSRS items.
|
/// Convert a series of revlog entries sorted by card id into FSRS items.
|
||||||
fn anki_to_fsrs(revlogs: Vec<RevlogEntry>, next_day_at: TimestampSecs) -> Vec<FSRSItem> {
|
fn fsrs_items_for_training(revlogs: Vec<RevlogEntry>, next_day_at: TimestampSecs) -> Vec<FSRSItem> {
|
||||||
let mut revlogs = revlogs
|
let mut revlogs = revlogs
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.group_by(|r| r.cid)
|
.group_by(|r| r.cid)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(|(_cid, entries)| single_card_revlog_to_items(entries.collect(), next_day_at))
|
.filter_map(|(_cid, entries)| {
|
||||||
|
single_card_revlog_to_items(entries.collect(), next_day_at, true)
|
||||||
|
})
|
||||||
.flatten()
|
.flatten()
|
||||||
.collect_vec();
|
.collect_vec();
|
||||||
revlogs.sort_by_cached_key(|r| r.reviews.len());
|
revlogs.sort_by_cached_key(|r| r.reviews.len());
|
||||||
revlogs
|
revlogs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// When updating memory state, FSRS only requires the last FSRSItem that
|
||||||
|
/// contains the full history.
|
||||||
|
pub(crate) fn fsrs_items_for_memory_state(
|
||||||
|
revlogs: Vec<RevlogEntry>,
|
||||||
|
next_day_at: TimestampSecs,
|
||||||
|
) -> Vec<(CardId, FSRSItem)> {
|
||||||
|
let mut out = vec![];
|
||||||
|
for (card_id, group) in revlogs.into_iter().group_by(|r| r.cid).into_iter() {
|
||||||
|
let entries = group.into_iter().collect_vec();
|
||||||
|
if let Some(mut items) = single_card_revlog_to_items(entries, next_day_at, false) {
|
||||||
|
if let Some(item) = items.pop() {
|
||||||
|
out.push((card_id, item));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Transform the revlog history for a card into a list of FSRSItems. FSRS
|
||||||
|
/// expects multiple items for a given card when training - for revlog
|
||||||
|
/// `[1,2,3]`, we create FSRSItems corresponding to `[1,2]` and `[1,2,3]`
|
||||||
|
/// in training, and `[1]`, [1,2]` and `[1,2,3]` when calculating memory
|
||||||
|
/// state.
|
||||||
fn single_card_revlog_to_items(
|
fn single_card_revlog_to_items(
|
||||||
mut entries: Vec<RevlogEntry>,
|
mut entries: Vec<RevlogEntry>,
|
||||||
next_day_at: TimestampSecs,
|
next_day_at: TimestampSecs,
|
||||||
|
training: bool,
|
||||||
) -> Option<Vec<FSRSItem>> {
|
) -> Option<Vec<FSRSItem>> {
|
||||||
// Find the index of the first learn entry in the last continuous group
|
let mut last_learn_entry = None;
|
||||||
let mut index_to_keep = 0;
|
for (index, entry) in entries.iter().enumerate().rev() {
|
||||||
let mut i = entries.len();
|
if entry.review_kind == RevlogReviewKind::Learning {
|
||||||
|
last_learn_entry = Some(index);
|
||||||
while i > 0 {
|
} else if last_learn_entry.is_some() {
|
||||||
i -= 1;
|
|
||||||
if entries[i].review_kind == RevlogReviewKind::Learning {
|
|
||||||
index_to_keep = i;
|
|
||||||
} else if index_to_keep != 0 {
|
|
||||||
// Found a continuous group
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
let first_relearn = entries
|
||||||
// Remove all entries before this one
|
.iter()
|
||||||
entries.drain(..index_to_keep);
|
.enumerate()
|
||||||
|
.find(|(_idx, e)| e.review_kind == RevlogReviewKind::Relearning)
|
||||||
// we ignore cards that don't start in the learning state
|
.map(|(idx, _)| idx);
|
||||||
if let Some(entry) = entries.first() {
|
if let Some(idx) = last_learn_entry.or(first_relearn) {
|
||||||
if entry.review_kind != RevlogReviewKind::Learning {
|
// start from the (re)learning step
|
||||||
return None;
|
if idx > 0 {
|
||||||
|
entries.drain(..idx);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// no revlog entries
|
// we ignore cards that don't have any learning steps
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Keep only the first review when multiple reviews done on one day
|
// Filter out unwanted entries
|
||||||
let mut unique_dates = std::collections::HashSet::new();
|
let mut unique_dates = std::collections::HashSet::new();
|
||||||
entries.retain(|entry| unique_dates.insert(entry.days_elapsed(next_day_at)));
|
entries.retain(|entry| {
|
||||||
|
let manually_rescheduled =
|
||||||
|
entry.review_kind == RevlogReviewKind::Manual || entry.button_chosen == 0;
|
||||||
|
let cram = entry.review_kind == RevlogReviewKind::Filtered && entry.ease_factor == 0;
|
||||||
|
if manually_rescheduled || cram {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Keep only the first review when multiple reviews done on one day
|
||||||
|
unique_dates.insert(entry.days_elapsed(next_day_at))
|
||||||
|
});
|
||||||
|
|
||||||
// Old versions of Anki did not record Manual entries in the review log when
|
// Old versions of Anki did not record Manual entries in the review log when
|
||||||
// cards were manually rescheduled. So we look for times when the card has
|
// cards were manually rescheduled. So we look for times when the card has
|
||||||
|
@ -153,13 +194,13 @@ fn single_card_revlog_to_items(
|
||||||
}))
|
}))
|
||||||
.collect_vec();
|
.collect_vec();
|
||||||
|
|
||||||
// Skip the first learning step, then convert the remaining entries into
|
let skip = if training { 1 } else { 0 };
|
||||||
// separate FSRSItems, where each item contains all reviews done until then.
|
// Convert the remaining entries into separate FSRSItems, where each item
|
||||||
Some(
|
// contains all reviews done until then.
|
||||||
entries
|
let items = entries
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.skip(1)
|
.skip(skip)
|
||||||
.map(|(outer_idx, _)| {
|
.map(|(outer_idx, _)| {
|
||||||
let reviews = entries
|
let reviews = entries
|
||||||
.iter()
|
.iter()
|
||||||
|
@ -172,8 +213,12 @@ fn single_card_revlog_to_items(
|
||||||
.collect();
|
.collect();
|
||||||
FSRSItem { reviews }
|
FSRSItem { reviews }
|
||||||
})
|
})
|
||||||
.collect(),
|
.collect_vec();
|
||||||
)
|
if items.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(items)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RevlogEntry {
|
impl RevlogEntry {
|
||||||
|
@ -192,94 +237,136 @@ mod tests {
|
||||||
RevlogEntry {
|
RevlogEntry {
|
||||||
review_kind,
|
review_kind,
|
||||||
id: ((NEXT_DAY_AT.0 - days_ago * 86400) * 1000).into(),
|
id: ((NEXT_DAY_AT.0 - days_ago * 86400) * 1000).into(),
|
||||||
|
button_chosen: 3,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn review(delta_t: i32) -> FSRSReview {
|
||||||
|
FSRSReview { rating: 3, delta_t }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn convert(revlog: &[RevlogEntry], training: bool) -> Option<Vec<FSRSItem>> {
|
||||||
|
single_card_revlog_to_items(revlog.to_vec(), NEXT_DAY_AT, training)
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! fsrs_items {
|
||||||
|
($($reviews:expr),*) => {
|
||||||
|
Some(vec![
|
||||||
|
$(
|
||||||
|
FSRSItem {
|
||||||
|
reviews: $reviews.to_vec()
|
||||||
|
}
|
||||||
|
),*
|
||||||
|
])
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn delta_t_is_correct() -> Result<()> {
|
fn delta_t_is_correct() -> Result<()> {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
single_card_revlog_to_items(
|
convert(
|
||||||
vec![
|
&[
|
||||||
revlog(RevlogReviewKind::Learning, 1),
|
revlog(RevlogReviewKind::Learning, 1),
|
||||||
revlog(RevlogReviewKind::Review, 0)
|
revlog(RevlogReviewKind::Review, 0)
|
||||||
],
|
],
|
||||||
NEXT_DAY_AT
|
true,
|
||||||
),
|
),
|
||||||
Some(vec![FSRSItem {
|
fsrs_items!([review(0), review(1)])
|
||||||
reviews: vec![
|
|
||||||
FSRSReview {
|
|
||||||
rating: 0,
|
|
||||||
delta_t: 0
|
|
||||||
},
|
|
||||||
FSRSReview {
|
|
||||||
rating: 0,
|
|
||||||
delta_t: 1
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}])
|
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
single_card_revlog_to_items(
|
convert(
|
||||||
vec![
|
&[
|
||||||
revlog(RevlogReviewKind::Learning, 15),
|
revlog(RevlogReviewKind::Learning, 15),
|
||||||
revlog(RevlogReviewKind::Learning, 13),
|
revlog(RevlogReviewKind::Learning, 13),
|
||||||
revlog(RevlogReviewKind::Review, 10),
|
revlog(RevlogReviewKind::Review, 10),
|
||||||
revlog(RevlogReviewKind::Review, 5)
|
revlog(RevlogReviewKind::Review, 5)
|
||||||
],
|
],
|
||||||
NEXT_DAY_AT,
|
true,
|
||||||
),
|
),
|
||||||
Some(vec![
|
fsrs_items!(
|
||||||
FSRSItem {
|
[review(0), review(2)],
|
||||||
reviews: vec![
|
[review(0), review(2), review(3)],
|
||||||
FSRSReview {
|
[review(0), review(2), review(3), review(5)]
|
||||||
rating: 0,
|
)
|
||||||
delta_t: 0
|
);
|
||||||
},
|
assert_eq!(
|
||||||
FSRSReview {
|
convert(
|
||||||
rating: 0,
|
&[
|
||||||
delta_t: 2
|
revlog(RevlogReviewKind::Learning, 15),
|
||||||
}
|
revlog(RevlogReviewKind::Learning, 13),
|
||||||
]
|
],
|
||||||
},
|
true,
|
||||||
FSRSItem {
|
),
|
||||||
reviews: vec![
|
fsrs_items!([review(0), review(2),])
|
||||||
FSRSReview {
|
|
||||||
rating: 0,
|
|
||||||
delta_t: 0
|
|
||||||
},
|
|
||||||
FSRSReview {
|
|
||||||
rating: 0,
|
|
||||||
delta_t: 2
|
|
||||||
},
|
|
||||||
FSRSReview {
|
|
||||||
rating: 0,
|
|
||||||
delta_t: 3
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
FSRSItem {
|
|
||||||
reviews: vec![
|
|
||||||
FSRSReview {
|
|
||||||
rating: 0,
|
|
||||||
delta_t: 0
|
|
||||||
},
|
|
||||||
FSRSReview {
|
|
||||||
rating: 0,
|
|
||||||
delta_t: 2
|
|
||||||
},
|
|
||||||
FSRSReview {
|
|
||||||
rating: 0,
|
|
||||||
delta_t: 3
|
|
||||||
},
|
|
||||||
FSRSReview {
|
|
||||||
rating: 0,
|
|
||||||
delta_t: 5
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
])
|
|
||||||
);
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn cram_is_filtered() {
|
||||||
|
assert_eq!(
|
||||||
|
convert(
|
||||||
|
&[
|
||||||
|
revlog(RevlogReviewKind::Learning, 10),
|
||||||
|
revlog(RevlogReviewKind::Review, 9),
|
||||||
|
revlog(RevlogReviewKind::Filtered, 7),
|
||||||
|
revlog(RevlogReviewKind::Review, 4),
|
||||||
|
],
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
fsrs_items!([review(0), review(1)], [review(0), review(1), review(5)])
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn set_due_date_is_filtered() {
|
||||||
|
assert_eq!(
|
||||||
|
convert(
|
||||||
|
&[
|
||||||
|
revlog(RevlogReviewKind::Learning, 10),
|
||||||
|
revlog(RevlogReviewKind::Review, 9),
|
||||||
|
RevlogEntry {
|
||||||
|
ease_factor: 100,
|
||||||
|
..revlog(RevlogReviewKind::Manual, 7)
|
||||||
|
},
|
||||||
|
revlog(RevlogReviewKind::Review, 4),
|
||||||
|
],
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
fsrs_items!([review(0), review(1)], [review(0), review(1), review(5)])
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn card_reset_drops_all_previous_history() {
|
||||||
|
assert_eq!(
|
||||||
|
convert(
|
||||||
|
&[
|
||||||
|
revlog(RevlogReviewKind::Learning, 10),
|
||||||
|
revlog(RevlogReviewKind::Review, 9),
|
||||||
|
RevlogEntry {
|
||||||
|
ease_factor: 0,
|
||||||
|
..revlog(RevlogReviewKind::Manual, 7)
|
||||||
|
},
|
||||||
|
revlog(RevlogReviewKind::Learning, 4),
|
||||||
|
revlog(RevlogReviewKind::Review, 0),
|
||||||
|
],
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
fsrs_items!([review(0), review(4)])
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn single_learning_step_skipped_when_training() {
|
||||||
|
assert_eq!(
|
||||||
|
convert(&[revlog(RevlogReviewKind::Learning, 1),], true),
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
convert(&[revlog(RevlogReviewKind::Learning, 1),], false),
|
||||||
|
fsrs_items!([review(0)])
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,6 +57,7 @@ impl Card {
|
||||||
self.reps = 0;
|
self.reps = 0;
|
||||||
self.lapses = 0;
|
self.lapses = 0;
|
||||||
}
|
}
|
||||||
|
self.fsrs_memory_state = None;
|
||||||
|
|
||||||
last_position.is_none()
|
last_position.is_none()
|
||||||
}
|
}
|
||||||
|
|
|
@ -244,9 +244,7 @@ impl crate::services::SchedulerService for Collection {
|
||||||
&mut self,
|
&mut self,
|
||||||
input: scheduler::ComputeFsrsWeightsRequest,
|
input: scheduler::ComputeFsrsWeightsRequest,
|
||||||
) -> Result<scheduler::ComputeFsrsWeightsResponse> {
|
) -> Result<scheduler::ComputeFsrsWeightsResponse> {
|
||||||
Ok(scheduler::ComputeFsrsWeightsResponse {
|
self.compute_weights(&input.search)
|
||||||
weights: self.compute_weights(&input.search)?,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compute_optimal_retention(
|
fn compute_optimal_retention(
|
||||||
|
@ -264,8 +262,8 @@ impl crate::services::SchedulerService for Collection {
|
||||||
) -> Result<scheduler::EvaluateWeightsResponse> {
|
) -> Result<scheduler::EvaluateWeightsResponse> {
|
||||||
let ret = self.evaluate_weights(&input.weights, &input.search)?;
|
let ret = self.evaluate_weights(&input.weights, &input.search)?;
|
||||||
Ok(scheduler::EvaluateWeightsResponse {
|
Ok(scheduler::EvaluateWeightsResponse {
|
||||||
log_loss: ret.0,
|
log_loss: ret.log_loss,
|
||||||
rmse: ret.1,
|
rmse_bins: ret.rmse_bins,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,7 @@ impl From<anki_proto::scheduler::scheduling_state::Learning> for LearnState {
|
||||||
LearnState {
|
LearnState {
|
||||||
remaining_steps: state.remaining_steps,
|
remaining_steps: state.remaining_steps,
|
||||||
scheduled_secs: state.scheduled_secs,
|
scheduled_secs: state.scheduled_secs,
|
||||||
|
fsrs_memory_state: state.fsrs_memory_state.map(Into::into),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,6 +18,7 @@ impl From<LearnState> for anki_proto::scheduler::scheduling_state::Learning {
|
||||||
anki_proto::scheduler::scheduling_state::Learning {
|
anki_proto::scheduler::scheduling_state::Learning {
|
||||||
remaining_steps: state.remaining_steps,
|
remaining_steps: state.remaining_steps,
|
||||||
scheduled_secs: state.scheduled_secs,
|
scheduled_secs: state.scheduled_secs,
|
||||||
|
fsrs_memory_state: state.fsrs_memory_state.map(Into::into),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@ impl From<anki_proto::scheduler::scheduling_state::Review> for ReviewState {
|
||||||
ease_factor: state.ease_factor,
|
ease_factor: state.ease_factor,
|
||||||
lapses: state.lapses,
|
lapses: state.lapses,
|
||||||
leeched: state.leeched,
|
leeched: state.leeched,
|
||||||
|
fsrs_memory_state: state.fsrs_memory_state.map(Into::into),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,6 +24,7 @@ impl From<ReviewState> for anki_proto::scheduler::scheduling_state::Review {
|
||||||
ease_factor: state.ease_factor,
|
ease_factor: state.ease_factor,
|
||||||
lapses: state.lapses,
|
lapses: state.lapses,
|
||||||
leeched: state.leeched,
|
leeched: state.leeched,
|
||||||
|
fsrs_memory_state: state.fsrs_memory_state.map(Into::into),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,16 +40,6 @@ impl<'a> StateContext<'a> {
|
||||||
(interval.round() as u32).clamp(minimum, maximum)
|
(interval.round() as u32).clamp(minimum, maximum)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn fuzzed_graduating_interval_good(&self) -> u32 {
|
|
||||||
let (minimum, maximum) = self.min_and_max_review_intervals(1);
|
|
||||||
self.with_review_fuzz(self.graduating_interval_good as f32, minimum, maximum)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn fuzzed_graduating_interval_easy(&self) -> u32 {
|
|
||||||
let (minimum, maximum) = self.min_and_max_review_intervals(1);
|
|
||||||
self.with_review_fuzz(self.graduating_interval_easy as f32, minimum, maximum)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the bounds of the fuzz range, respecting `minimum` and `maximum`.
|
/// Return the bounds of the fuzz range, respecting `minimum` and `maximum`.
|
||||||
|
|
|
@ -6,12 +6,14 @@ use super::CardState;
|
||||||
use super::ReviewState;
|
use super::ReviewState;
|
||||||
use super::SchedulingStates;
|
use super::SchedulingStates;
|
||||||
use super::StateContext;
|
use super::StateContext;
|
||||||
|
use crate::card::FsrsMemoryState;
|
||||||
use crate::revlog::RevlogReviewKind;
|
use crate::revlog::RevlogReviewKind;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||||
pub struct LearnState {
|
pub struct LearnState {
|
||||||
pub remaining_steps: u32,
|
pub remaining_steps: u32,
|
||||||
pub scheduled_secs: u32,
|
pub scheduled_secs: u32,
|
||||||
|
pub fsrs_memory_state: Option<FsrsMemoryState>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LearnState {
|
impl LearnState {
|
||||||
|
@ -27,7 +29,7 @@ impl LearnState {
|
||||||
SchedulingStates {
|
SchedulingStates {
|
||||||
current: self.into(),
|
current: self.into(),
|
||||||
again: self.answer_again(ctx).into(),
|
again: self.answer_again(ctx).into(),
|
||||||
hard: self.answer_hard(ctx),
|
hard: self.answer_hard(ctx).into(),
|
||||||
good: self.answer_good(ctx),
|
good: self.answer_good(ctx),
|
||||||
easy: self.answer_easy(ctx).into(),
|
easy: self.answer_easy(ctx).into(),
|
||||||
}
|
}
|
||||||
|
@ -37,38 +39,42 @@ impl LearnState {
|
||||||
LearnState {
|
LearnState {
|
||||||
remaining_steps: ctx.steps.remaining_for_failed(),
|
remaining_steps: ctx.steps.remaining_for_failed(),
|
||||||
scheduled_secs: ctx.steps.again_delay_secs_learn(),
|
scheduled_secs: ctx.steps.again_delay_secs_learn(),
|
||||||
|
fsrs_memory_state: ctx.fsrs_next_states.as_ref().map(|s| s.again.memory.into()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn answer_hard(self, ctx: &StateContext) -> CardState {
|
fn answer_hard(self, ctx: &StateContext) -> LearnState {
|
||||||
if let Some(hard_delay) = ctx.steps.hard_delay_secs(self.remaining_steps) {
|
|
||||||
LearnState {
|
LearnState {
|
||||||
scheduled_secs: hard_delay,
|
scheduled_secs: ctx
|
||||||
|
.steps
|
||||||
|
.hard_delay_secs(self.remaining_steps)
|
||||||
|
// user has 0 learning steps, which the UI doesn't allow
|
||||||
|
.unwrap_or(60),
|
||||||
|
fsrs_memory_state: ctx.fsrs_next_states.as_ref().map(|s| s.hard.memory.into()),
|
||||||
..self
|
..self
|
||||||
}
|
}
|
||||||
.into()
|
|
||||||
} else {
|
|
||||||
// steps modified while card in learning
|
|
||||||
ReviewState {
|
|
||||||
scheduled_days: ctx.fuzzed_graduating_interval_good(),
|
|
||||||
ease_factor: ctx.initial_ease_factor,
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
.into()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn answer_good(self, ctx: &StateContext) -> CardState {
|
fn answer_good(self, ctx: &StateContext) -> CardState {
|
||||||
|
let fsrs_memory_state = ctx.fsrs_next_states.as_ref().map(|s| s.good.memory.into());
|
||||||
if let Some(good_delay) = ctx.steps.good_delay_secs(self.remaining_steps) {
|
if let Some(good_delay) = ctx.steps.good_delay_secs(self.remaining_steps) {
|
||||||
LearnState {
|
LearnState {
|
||||||
remaining_steps: ctx.steps.remaining_for_good(self.remaining_steps),
|
remaining_steps: ctx.steps.remaining_for_good(self.remaining_steps),
|
||||||
scheduled_secs: good_delay,
|
scheduled_secs: good_delay,
|
||||||
|
fsrs_memory_state,
|
||||||
}
|
}
|
||||||
.into()
|
.into()
|
||||||
} else {
|
} else {
|
||||||
|
let (minimum, maximum) = ctx.min_and_max_review_intervals(1);
|
||||||
|
let interval = if let Some(states) = &ctx.fsrs_next_states {
|
||||||
|
states.good.interval
|
||||||
|
} else {
|
||||||
|
ctx.graduating_interval_good
|
||||||
|
};
|
||||||
ReviewState {
|
ReviewState {
|
||||||
scheduled_days: ctx.fuzzed_graduating_interval_good(),
|
scheduled_days: ctx.with_review_fuzz(interval as f32, minimum, maximum),
|
||||||
ease_factor: ctx.initial_ease_factor,
|
ease_factor: ctx.initial_ease_factor,
|
||||||
|
fsrs_memory_state,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
}
|
}
|
||||||
.into()
|
.into()
|
||||||
|
@ -76,9 +82,17 @@ impl LearnState {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn answer_easy(self, ctx: &StateContext) -> ReviewState {
|
fn answer_easy(self, ctx: &StateContext) -> ReviewState {
|
||||||
|
let (mut minimum, maximum) = ctx.min_and_max_review_intervals(1);
|
||||||
|
let interval = if let Some(states) = &ctx.fsrs_next_states {
|
||||||
|
minimum = states.good.interval + 1;
|
||||||
|
states.easy.interval
|
||||||
|
} else {
|
||||||
|
ctx.graduating_interval_easy
|
||||||
|
};
|
||||||
ReviewState {
|
ReviewState {
|
||||||
scheduled_days: ctx.fuzzed_graduating_interval_easy(),
|
scheduled_days: ctx.with_review_fuzz(interval as f32, minimum, maximum),
|
||||||
ease_factor: ctx.initial_ease_factor,
|
ease_factor: ctx.initial_ease_factor,
|
||||||
|
fsrs_memory_state: ctx.fsrs_next_states.as_ref().map(|s| s.easy.memory.into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,6 +14,7 @@ pub(crate) mod review;
|
||||||
pub(crate) mod steps;
|
pub(crate) mod steps;
|
||||||
|
|
||||||
pub use filtered::FilteredState;
|
pub use filtered::FilteredState;
|
||||||
|
use fsrs::NextStates;
|
||||||
pub(crate) use interval_kind::IntervalKind;
|
pub(crate) use interval_kind::IntervalKind;
|
||||||
pub use learning::LearnState;
|
pub use learning::LearnState;
|
||||||
pub use new::NewState;
|
pub use new::NewState;
|
||||||
|
@ -83,6 +84,7 @@ pub(crate) struct StateContext<'a> {
|
||||||
/// In range `0.0..1.0`. Used to pick the final interval from the fuzz
|
/// In range `0.0..1.0`. Used to pick the final interval from the fuzz
|
||||||
/// range.
|
/// range.
|
||||||
pub fuzz_factor: Option<f32>,
|
pub fuzz_factor: Option<f32>,
|
||||||
|
pub fsrs_next_states: Option<NextStates>,
|
||||||
|
|
||||||
// learning
|
// learning
|
||||||
pub steps: LearningSteps<'a>,
|
pub steps: LearningSteps<'a>,
|
||||||
|
@ -135,6 +137,7 @@ impl<'a> StateContext<'a> {
|
||||||
minimum_lapse_interval: 1,
|
minimum_lapse_interval: 1,
|
||||||
in_filtered_deck: false,
|
in_filtered_deck: false,
|
||||||
preview_step: 10,
|
preview_step: 10,
|
||||||
|
fsrs_next_states: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,6 +44,7 @@ impl NormalState {
|
||||||
let next_states = LearnState {
|
let next_states = LearnState {
|
||||||
remaining_steps: ctx.steps.remaining_for_failed(),
|
remaining_steps: ctx.steps.remaining_for_failed(),
|
||||||
scheduled_secs: 0,
|
scheduled_secs: 0,
|
||||||
|
fsrs_memory_state: None,
|
||||||
}
|
}
|
||||||
.next_states(ctx);
|
.next_states(ctx);
|
||||||
// .. but with current as New, not Learning
|
// .. but with current as New, not Learning
|
||||||
|
|
|
@ -30,20 +30,23 @@ impl RelearnState {
|
||||||
again: self.answer_again(ctx),
|
again: self.answer_again(ctx),
|
||||||
hard: self.answer_hard(ctx),
|
hard: self.answer_hard(ctx),
|
||||||
good: self.answer_good(ctx),
|
good: self.answer_good(ctx),
|
||||||
easy: self.answer_easy().into(),
|
easy: self.answer_easy(ctx).into(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn answer_again(self, ctx: &StateContext) -> CardState {
|
fn answer_again(self, ctx: &StateContext) -> CardState {
|
||||||
|
let (scheduled_days, fsrs_memory_state) = self.review.failing_review_interval(ctx);
|
||||||
if let Some(again_delay) = ctx.relearn_steps.again_delay_secs_relearn() {
|
if let Some(again_delay) = ctx.relearn_steps.again_delay_secs_relearn() {
|
||||||
RelearnState {
|
RelearnState {
|
||||||
learning: LearnState {
|
learning: LearnState {
|
||||||
remaining_steps: ctx.relearn_steps.remaining_for_failed(),
|
remaining_steps: ctx.relearn_steps.remaining_for_failed(),
|
||||||
scheduled_secs: again_delay,
|
scheduled_secs: again_delay,
|
||||||
|
fsrs_memory_state,
|
||||||
},
|
},
|
||||||
review: ReviewState {
|
review: ReviewState {
|
||||||
scheduled_days: self.review.failing_review_interval(ctx),
|
scheduled_days,
|
||||||
elapsed_days: 0,
|
elapsed_days: 0,
|
||||||
|
fsrs_memory_state,
|
||||||
..self.review
|
..self.review
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -54,6 +57,7 @@ impl RelearnState {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn answer_hard(self, ctx: &StateContext) -> CardState {
|
fn answer_hard(self, ctx: &StateContext) -> CardState {
|
||||||
|
let fsrs_memory_state = ctx.fsrs_next_states.as_ref().map(|s| s.hard.memory.into());
|
||||||
if let Some(hard_delay) = ctx
|
if let Some(hard_delay) = ctx
|
||||||
.relearn_steps
|
.relearn_steps
|
||||||
.hard_delay_secs(self.learning.remaining_steps)
|
.hard_delay_secs(self.learning.remaining_steps)
|
||||||
|
@ -61,10 +65,12 @@ impl RelearnState {
|
||||||
RelearnState {
|
RelearnState {
|
||||||
learning: LearnState {
|
learning: LearnState {
|
||||||
scheduled_secs: hard_delay,
|
scheduled_secs: hard_delay,
|
||||||
|
fsrs_memory_state,
|
||||||
..self.learning
|
..self.learning
|
||||||
},
|
},
|
||||||
review: ReviewState {
|
review: ReviewState {
|
||||||
elapsed_days: 0,
|
elapsed_days: 0,
|
||||||
|
fsrs_memory_state,
|
||||||
..self.review
|
..self.review
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -75,6 +81,7 @@ impl RelearnState {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn answer_good(self, ctx: &StateContext) -> CardState {
|
fn answer_good(self, ctx: &StateContext) -> CardState {
|
||||||
|
let fsrs_memory_state = ctx.fsrs_next_states.as_ref().map(|s| s.good.memory.into());
|
||||||
if let Some(good_delay) = ctx
|
if let Some(good_delay) = ctx
|
||||||
.relearn_steps
|
.relearn_steps
|
||||||
.good_delay_secs(self.learning.remaining_steps)
|
.good_delay_secs(self.learning.remaining_steps)
|
||||||
|
@ -85,9 +92,11 @@ impl RelearnState {
|
||||||
remaining_steps: ctx
|
remaining_steps: ctx
|
||||||
.relearn_steps
|
.relearn_steps
|
||||||
.remaining_for_good(self.learning.remaining_steps),
|
.remaining_for_good(self.learning.remaining_steps),
|
||||||
|
fsrs_memory_state,
|
||||||
},
|
},
|
||||||
review: ReviewState {
|
review: ReviewState {
|
||||||
elapsed_days: 0,
|
elapsed_days: 0,
|
||||||
|
fsrs_memory_state,
|
||||||
..self.review
|
..self.review
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -97,10 +106,11 @@ impl RelearnState {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn answer_easy(self) -> ReviewState {
|
fn answer_easy(self, ctx: &StateContext) -> ReviewState {
|
||||||
ReviewState {
|
ReviewState {
|
||||||
scheduled_days: self.review.scheduled_days + 1,
|
scheduled_days: self.review.scheduled_days + 1,
|
||||||
elapsed_days: 0,
|
elapsed_days: 0,
|
||||||
|
fsrs_memory_state: ctx.fsrs_next_states.as_ref().map(|s| s.easy.memory.into()),
|
||||||
..self.review
|
..self.review
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,15 @@
|
||||||
// Copyright: Ankitects Pty Ltd and contributors
|
// Copyright: Ankitects Pty Ltd and contributors
|
||||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
|
use fsrs::NextStates;
|
||||||
|
|
||||||
use super::interval_kind::IntervalKind;
|
use super::interval_kind::IntervalKind;
|
||||||
use super::CardState;
|
use super::CardState;
|
||||||
use super::LearnState;
|
use super::LearnState;
|
||||||
use super::RelearnState;
|
use super::RelearnState;
|
||||||
use super::SchedulingStates;
|
use super::SchedulingStates;
|
||||||
use super::StateContext;
|
use super::StateContext;
|
||||||
|
use crate::card::FsrsMemoryState;
|
||||||
use crate::revlog::RevlogReviewKind;
|
use crate::revlog::RevlogReviewKind;
|
||||||
|
|
||||||
pub const INITIAL_EASE_FACTOR: f32 = 2.5;
|
pub const INITIAL_EASE_FACTOR: f32 = 2.5;
|
||||||
|
@ -22,6 +25,7 @@ pub struct ReviewState {
|
||||||
pub ease_factor: f32,
|
pub ease_factor: f32,
|
||||||
pub lapses: u32,
|
pub lapses: u32,
|
||||||
pub leeched: bool,
|
pub leeched: bool,
|
||||||
|
pub fsrs_memory_state: Option<FsrsMemoryState>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ReviewState {
|
impl Default for ReviewState {
|
||||||
|
@ -32,6 +36,7 @@ impl Default for ReviewState {
|
||||||
ease_factor: INITIAL_EASE_FACTOR,
|
ease_factor: INITIAL_EASE_FACTOR,
|
||||||
lapses: 0,
|
lapses: 0,
|
||||||
leeched: false,
|
leeched: false,
|
||||||
|
fsrs_memory_state: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -61,27 +66,37 @@ impl ReviewState {
|
||||||
SchedulingStates {
|
SchedulingStates {
|
||||||
current: self.into(),
|
current: self.into(),
|
||||||
again: self.answer_again(ctx),
|
again: self.answer_again(ctx),
|
||||||
hard: self.answer_hard(hard_interval).into(),
|
hard: self.answer_hard(hard_interval, ctx).into(),
|
||||||
good: self.answer_good(good_interval).into(),
|
good: self.answer_good(good_interval, ctx).into(),
|
||||||
easy: self.answer_easy(easy_interval).into(),
|
easy: self.answer_easy(easy_interval, ctx).into(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn failing_review_interval(self, ctx: &StateContext) -> u32 {
|
pub(crate) fn failing_review_interval(
|
||||||
(((self.scheduled_days as f32) * ctx.lapse_multiplier) as u32)
|
self,
|
||||||
|
ctx: &StateContext,
|
||||||
|
) -> (u32, Option<FsrsMemoryState>) {
|
||||||
|
if let Some(states) = &ctx.fsrs_next_states {
|
||||||
|
(states.again.interval, Some(states.again.memory.into()))
|
||||||
|
} else {
|
||||||
|
let interval = (((self.scheduled_days as f32) * ctx.lapse_multiplier) as u32)
|
||||||
.max(ctx.minimum_lapse_interval)
|
.max(ctx.minimum_lapse_interval)
|
||||||
.max(1)
|
.max(1);
|
||||||
|
(interval, None)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn answer_again(self, ctx: &StateContext) -> CardState {
|
fn answer_again(self, ctx: &StateContext) -> CardState {
|
||||||
let lapses = self.lapses + 1;
|
let lapses = self.lapses + 1;
|
||||||
let leeched = leech_threshold_met(lapses, ctx.leech_threshold);
|
let leeched = leech_threshold_met(lapses, ctx.leech_threshold);
|
||||||
|
let (scheduled_days, fsrs_memory_state) = self.failing_review_interval(ctx);
|
||||||
let again_review = ReviewState {
|
let again_review = ReviewState {
|
||||||
scheduled_days: self.failing_review_interval(ctx),
|
scheduled_days,
|
||||||
elapsed_days: 0,
|
elapsed_days: 0,
|
||||||
ease_factor: (self.ease_factor + EASE_FACTOR_AGAIN_DELTA).max(MINIMUM_EASE_FACTOR),
|
ease_factor: (self.ease_factor + EASE_FACTOR_AGAIN_DELTA).max(MINIMUM_EASE_FACTOR),
|
||||||
lapses,
|
lapses,
|
||||||
leeched,
|
leeched,
|
||||||
|
fsrs_memory_state,
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(again_delay) = ctx.relearn_steps.again_delay_secs_relearn() {
|
if let Some(again_delay) = ctx.relearn_steps.again_delay_secs_relearn() {
|
||||||
|
@ -89,6 +104,7 @@ impl ReviewState {
|
||||||
learning: LearnState {
|
learning: LearnState {
|
||||||
remaining_steps: ctx.relearn_steps.remaining_for_failed(),
|
remaining_steps: ctx.relearn_steps.remaining_for_failed(),
|
||||||
scheduled_secs: again_delay,
|
scheduled_secs: again_delay,
|
||||||
|
fsrs_memory_state,
|
||||||
},
|
},
|
||||||
review: again_review,
|
review: again_review,
|
||||||
}
|
}
|
||||||
|
@ -98,28 +114,31 @@ impl ReviewState {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn answer_hard(self, scheduled_days: u32) -> ReviewState {
|
fn answer_hard(self, scheduled_days: u32, ctx: &StateContext) -> ReviewState {
|
||||||
ReviewState {
|
ReviewState {
|
||||||
scheduled_days,
|
scheduled_days,
|
||||||
elapsed_days: 0,
|
elapsed_days: 0,
|
||||||
ease_factor: (self.ease_factor + EASE_FACTOR_HARD_DELTA).max(MINIMUM_EASE_FACTOR),
|
ease_factor: (self.ease_factor + EASE_FACTOR_HARD_DELTA).max(MINIMUM_EASE_FACTOR),
|
||||||
|
fsrs_memory_state: ctx.fsrs_next_states.as_ref().map(|s| s.hard.memory.into()),
|
||||||
..self
|
..self
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn answer_good(self, scheduled_days: u32) -> ReviewState {
|
fn answer_good(self, scheduled_days: u32, ctx: &StateContext) -> ReviewState {
|
||||||
ReviewState {
|
ReviewState {
|
||||||
scheduled_days,
|
scheduled_days,
|
||||||
elapsed_days: 0,
|
elapsed_days: 0,
|
||||||
|
fsrs_memory_state: ctx.fsrs_next_states.as_ref().map(|s| s.good.memory.into()),
|
||||||
..self
|
..self
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn answer_easy(self, scheduled_days: u32) -> ReviewState {
|
fn answer_easy(self, scheduled_days: u32, ctx: &StateContext) -> ReviewState {
|
||||||
ReviewState {
|
ReviewState {
|
||||||
scheduled_days,
|
scheduled_days,
|
||||||
elapsed_days: 0,
|
elapsed_days: 0,
|
||||||
ease_factor: self.ease_factor + EASE_FACTOR_EASY_DELTA,
|
ease_factor: self.ease_factor + EASE_FACTOR_EASY_DELTA,
|
||||||
|
fsrs_memory_state: ctx.fsrs_next_states.as_ref().map(|s| s.easy.memory.into()),
|
||||||
..self
|
..self
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -127,13 +146,26 @@ impl ReviewState {
|
||||||
/// Return the intervals for hard, good and easy, each of which depends on
|
/// Return the intervals for hard, good and easy, each of which depends on
|
||||||
/// the previous.
|
/// the previous.
|
||||||
fn passing_review_intervals(self, ctx: &StateContext) -> (u32, u32, u32) {
|
fn passing_review_intervals(self, ctx: &StateContext) -> (u32, u32, u32) {
|
||||||
if self.days_late() < 0 {
|
if let Some(states) = &ctx.fsrs_next_states {
|
||||||
|
self.passing_fsrs_review_intervals(ctx, states)
|
||||||
|
} else if self.days_late() < 0 {
|
||||||
self.passing_early_review_intervals(ctx)
|
self.passing_early_review_intervals(ctx)
|
||||||
} else {
|
} else {
|
||||||
self.passing_nonearly_review_intervals(ctx)
|
self.passing_nonearly_review_intervals(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn passing_fsrs_review_intervals(
|
||||||
|
self,
|
||||||
|
ctx: &StateContext,
|
||||||
|
states: &NextStates,
|
||||||
|
) -> (u32, u32, u32) {
|
||||||
|
let hard = constrain_passing_interval(ctx, states.hard.interval as f32, 1, true);
|
||||||
|
let good = constrain_passing_interval(ctx, states.good.interval as f32, hard + 1, true);
|
||||||
|
let easy = constrain_passing_interval(ctx, states.easy.interval as f32, good + 1, true);
|
||||||
|
(hard, good, easy)
|
||||||
|
}
|
||||||
|
|
||||||
fn passing_nonearly_review_intervals(self, ctx: &StateContext) -> (u32, u32, u32) {
|
fn passing_nonearly_review_intervals(self, ctx: &StateContext) -> (u32, u32, u32) {
|
||||||
let current_interval = self.scheduled_days as f32;
|
let current_interval = self.scheduled_days as f32;
|
||||||
let days_late = self.days_late().max(0) as f32;
|
let days_late = self.days_late().max(0) as f32;
|
||||||
|
@ -219,12 +251,16 @@ fn leech_threshold_met(lapses: u32, threshold: u32) -> bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Transform the provided hard/good/easy interval.
|
/// Transform the provided hard/good/easy interval.
|
||||||
/// - Apply configured interval multiplier.
|
/// - Apply configured interval multiplier if not FSRS.
|
||||||
/// - Apply fuzz.
|
/// - Apply fuzz.
|
||||||
/// - Ensure it is at least `minimum`, and at least 1.
|
/// - Ensure it is at least `minimum`, and at least 1.
|
||||||
/// - Ensure it is at or below the configured maximum interval.
|
/// - Ensure it is at or below the configured maximum interval.
|
||||||
fn constrain_passing_interval(ctx: &StateContext, interval: f32, minimum: u32, fuzz: bool) -> u32 {
|
fn constrain_passing_interval(ctx: &StateContext, interval: f32, minimum: u32, fuzz: bool) -> u32 {
|
||||||
let interval = interval * ctx.interval_multiplier;
|
let interval = if ctx.fsrs_next_states.is_some() {
|
||||||
|
interval
|
||||||
|
} else {
|
||||||
|
interval * ctx.interval_multiplier
|
||||||
|
};
|
||||||
let (minimum, maximum) = ctx.min_and_max_review_intervals(minimum);
|
let (minimum, maximum) = ctx.min_and_max_review_intervals(minimum);
|
||||||
if fuzz {
|
if fuzz {
|
||||||
ctx.with_review_fuzz(interval, minimum, maximum)
|
ctx.with_review_fuzz(interval, minimum, maximum)
|
||||||
|
@ -277,6 +313,7 @@ mod test {
|
||||||
ease_factor: 1.3,
|
ease_factor: 1.3,
|
||||||
lapses: 0,
|
lapses: 0,
|
||||||
leeched: false,
|
leeched: false,
|
||||||
|
fsrs_memory_state: None,
|
||||||
};
|
};
|
||||||
ctx.fuzz_factor = Some(0.0);
|
ctx.fuzz_factor = Some(0.0);
|
||||||
assert_eq!(state.passing_review_intervals(&ctx), (2, 3, 4));
|
assert_eq!(state.passing_review_intervals(&ctx), (2, 3, 4));
|
||||||
|
@ -305,6 +342,7 @@ mod test {
|
||||||
ease_factor: 1.3,
|
ease_factor: 1.3,
|
||||||
lapses: 0,
|
lapses: 0,
|
||||||
leeched: false,
|
leeched: false,
|
||||||
|
fsrs_memory_state: None,
|
||||||
};
|
};
|
||||||
ctx.fuzz_factor = Some(0.0);
|
ctx.fuzz_factor = Some(0.0);
|
||||||
assert_eq!(state.passing_review_intervals(&ctx), (1, 3, 4));
|
assert_eq!(state.passing_review_intervals(&ctx), (1, 3, 4));
|
||||||
|
|
|
@ -28,6 +28,7 @@ pub use writer::replace_search_node;
|
||||||
use crate::browser_table::Column;
|
use crate::browser_table::Column;
|
||||||
use crate::card::CardType;
|
use crate::card::CardType;
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
|
use crate::scheduler::timing::SchedTimingToday;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||||
pub enum ReturnItemType {
|
pub enum ReturnItemType {
|
||||||
|
@ -207,7 +208,7 @@ impl Collection {
|
||||||
SortMode::Builtin { column, reverse } => {
|
SortMode::Builtin { column, reverse } => {
|
||||||
prepare_sort(self, column, item_type)?;
|
prepare_sort(self, column, item_type)?;
|
||||||
sql.push_str(" order by ");
|
sql.push_str(" order by ");
|
||||||
write_order(sql, item_type, column, reverse)?;
|
write_order(sql, item_type, column, reverse, self.timing_today()?)?;
|
||||||
}
|
}
|
||||||
SortMode::Custom(order_clause) => {
|
SortMode::Custom(order_clause) => {
|
||||||
sql.push_str(" order by ");
|
sql.push_str(" order by ");
|
||||||
|
@ -332,9 +333,10 @@ fn write_order(
|
||||||
item_type: ReturnItemType,
|
item_type: ReturnItemType,
|
||||||
column: Column,
|
column: Column,
|
||||||
reverse: bool,
|
reverse: bool,
|
||||||
|
timing: SchedTimingToday,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let order = match item_type {
|
let order = match item_type {
|
||||||
ReturnItemType::Cards => card_order_from_sort_column(column),
|
ReturnItemType::Cards => card_order_from_sort_column(column, timing),
|
||||||
ReturnItemType::Notes => note_order_from_sort_column(column),
|
ReturnItemType::Notes => note_order_from_sort_column(column),
|
||||||
};
|
};
|
||||||
require!(!order.is_empty(), "Can't sort {item_type:?} by {column:?}.");
|
require!(!order.is_empty(), "Can't sort {item_type:?} by {column:?}.");
|
||||||
|
@ -351,7 +353,7 @@ fn write_order(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn card_order_from_sort_column(column: Column) -> Cow<'static, str> {
|
fn card_order_from_sort_column(column: Column, timing: SchedTimingToday) -> Cow<'static, str> {
|
||||||
match column {
|
match column {
|
||||||
Column::CardMod => "c.mod asc".into(),
|
Column::CardMod => "c.mod asc".into(),
|
||||||
Column::Cards => concat!(
|
Column::Cards => concat!(
|
||||||
|
@ -372,6 +374,13 @@ fn card_order_from_sort_column(column: Column) -> Cow<'static, str> {
|
||||||
Column::SortField => "n.sfld collate nocase asc, c.ord asc".into(),
|
Column::SortField => "n.sfld collate nocase asc, c.ord asc".into(),
|
||||||
Column::Tags => "n.tags asc".into(),
|
Column::Tags => "n.tags asc".into(),
|
||||||
Column::Answer | Column::Custom | Column::Question => "".into(),
|
Column::Answer | Column::Custom | Column::Question => "".into(),
|
||||||
|
Column::Stability => "extract_fsrs_variable(c.data, 's') desc".into(),
|
||||||
|
Column::Difficulty => "extract_fsrs_variable(c.data, 'd') desc".into(),
|
||||||
|
Column::Retrievability => format!(
|
||||||
|
"extract_fsrs_retrievability(c.data, c.due, c.ivl, {})",
|
||||||
|
timing.days_elapsed
|
||||||
|
)
|
||||||
|
.into(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -390,7 +399,12 @@ fn note_order_from_sort_column(column: Column) -> Cow<'static, str> {
|
||||||
Column::Notetype => "(select pos from sort_order where ntid = n.mid) asc".into(),
|
Column::Notetype => "(select pos from sort_order where ntid = n.mid) asc".into(),
|
||||||
Column::SortField => "n.sfld collate nocase asc".into(),
|
Column::SortField => "n.sfld collate nocase asc".into(),
|
||||||
Column::Tags => "n.tags asc".into(),
|
Column::Tags => "n.tags asc".into(),
|
||||||
Column::Answer | Column::Custom | Column::Question => "".into(),
|
Column::Answer
|
||||||
|
| Column::Custom
|
||||||
|
| Column::Question
|
||||||
|
| Column::Stability
|
||||||
|
| Column::Difficulty
|
||||||
|
| Column::Retrievability => "".into(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -105,6 +105,9 @@ pub enum PropertyKind {
|
||||||
Ease(f32),
|
Ease(f32),
|
||||||
Position(u32),
|
Position(u32),
|
||||||
Rated(i32, RatingKind),
|
Rated(i32, RatingKind),
|
||||||
|
Stability(f32),
|
||||||
|
Difficulty(f32),
|
||||||
|
Retrievability(f32),
|
||||||
CustomDataNumber { key: String, value: f32 },
|
CustomDataNumber { key: String, value: f32 },
|
||||||
CustomDataString { key: String, value: String },
|
CustomDataString { key: String, value: String },
|
||||||
}
|
}
|
||||||
|
@ -410,6 +413,9 @@ fn parse_prop(prop_clause: &str) -> ParseResult<SearchNode> {
|
||||||
tag("pos"),
|
tag("pos"),
|
||||||
tag("rated"),
|
tag("rated"),
|
||||||
tag("resched"),
|
tag("resched"),
|
||||||
|
tag("s"),
|
||||||
|
tag("d"),
|
||||||
|
tag("r"),
|
||||||
recognize(preceded(tag("cdn:"), alphanumeric1)),
|
recognize(preceded(tag("cdn:"), alphanumeric1)),
|
||||||
recognize(preceded(tag("cds:"), alphanumeric1)),
|
recognize(preceded(tag("cds:"), alphanumeric1)),
|
||||||
))(prop_clause)
|
))(prop_clause)
|
||||||
|
@ -451,6 +457,9 @@ fn parse_prop(prop_clause: &str) -> ParseResult<SearchNode> {
|
||||||
"reps" => PropertyKind::Reps(parse_u32(num, prop_clause)?),
|
"reps" => PropertyKind::Reps(parse_u32(num, prop_clause)?),
|
||||||
"lapses" => PropertyKind::Lapses(parse_u32(num, prop_clause)?),
|
"lapses" => PropertyKind::Lapses(parse_u32(num, prop_clause)?),
|
||||||
"pos" => PropertyKind::Position(parse_u32(num, prop_clause)?),
|
"pos" => PropertyKind::Position(parse_u32(num, prop_clause)?),
|
||||||
|
"s" => PropertyKind::Stability(parse_f32(num, prop_clause)?),
|
||||||
|
"d" => PropertyKind::Difficulty(parse_f32(num, prop_clause)?),
|
||||||
|
"r" => PropertyKind::Retrievability(parse_f32(num, prop_clause)?),
|
||||||
prop if prop.starts_with("cdn:") => PropertyKind::CustomDataNumber {
|
prop if prop.starts_with("cdn:") => PropertyKind::CustomDataNumber {
|
||||||
key: prop.strip_prefix("cdn:").unwrap().into(),
|
key: prop.strip_prefix("cdn:").unwrap().into(),
|
||||||
value: parse_f32(num, prop_clause)?,
|
value: parse_f32(num, prop_clause)?,
|
||||||
|
|
|
@ -372,6 +372,21 @@ impl SqlWriter<'_> {
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
PropertyKind::Stability(s) => {
|
||||||
|
write!(self.sql, "extract_fsrs_variable(c.data, 's') {op} {s}").unwrap()
|
||||||
|
}
|
||||||
|
PropertyKind::Difficulty(d) => {
|
||||||
|
let d = d * 9.0 + 1.0;
|
||||||
|
write!(self.sql, "extract_fsrs_variable(c.data, 'd') {op} {d}").unwrap()
|
||||||
|
}
|
||||||
|
PropertyKind::Retrievability(r) => {
|
||||||
|
let elap = self.col.timing_today()?.days_elapsed;
|
||||||
|
write!(
|
||||||
|
self.sql,
|
||||||
|
"extract_fsrs_retrievability(c.data, c.due, c.ivl, {elap}) {op} {r}"
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -168,6 +168,9 @@ fn write_property(operator: &str, kind: &PropertyKind) -> String {
|
||||||
Lapses(u) => format!("prop:lapses{}{}", operator, u),
|
Lapses(u) => format!("prop:lapses{}{}", operator, u),
|
||||||
Ease(f) => format!("prop:ease{}{}", operator, f),
|
Ease(f) => format!("prop:ease{}{}", operator, f),
|
||||||
Position(u) => format!("prop:pos{}{}", operator, u),
|
Position(u) => format!("prop:pos{}{}", operator, u),
|
||||||
|
Stability(u) => format!("prop:s{}{}", operator, u),
|
||||||
|
Difficulty(u) => format!("prop:d{}{}", operator, u),
|
||||||
|
Retrievability(u) => format!("prop:r{}{}", operator, u),
|
||||||
Rated(u, ease) => match ease {
|
Rated(u, ease) => match ease {
|
||||||
RatingKind::AnswerButton(val) => format!("prop:rated{}{}:{}", operator, u, val),
|
RatingKind::AnswerButton(val) => format!("prop:rated{}{}:{}", operator, u, val),
|
||||||
RatingKind::AnyAnswerButton => format!("prop:rated{}{}", operator, u),
|
RatingKind::AnyAnswerButton => format!("prop:rated{}{}", operator, u),
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
// Copyright: Ankitects Pty Ltd and contributors
|
// Copyright: Ankitects Pty Ltd and contributors
|
||||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
|
use fsrs::FSRS;
|
||||||
|
|
||||||
use crate::card::CardQueue;
|
use crate::card::CardQueue;
|
||||||
use crate::card::CardType;
|
use crate::card::CardType;
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
|
@ -24,7 +26,15 @@ impl Collection {
|
||||||
|
|
||||||
let (average_secs, total_secs) = average_and_total_secs_strings(&revlog);
|
let (average_secs, total_secs) = average_and_total_secs_strings(&revlog);
|
||||||
let (due_date, due_position) = self.due_date_and_position(&card)?;
|
let (due_date, due_position) = self.due_date_and_position(&card)?;
|
||||||
|
let timing = self.timing_today()?;
|
||||||
|
let fsrs_retrievability = card
|
||||||
|
.fsrs_memory_state
|
||||||
|
.zip(card.days_since_last_review(&timing))
|
||||||
|
.map(|(state, days)| {
|
||||||
|
FSRS::new(None)
|
||||||
|
.unwrap()
|
||||||
|
.current_retrievability(state.into(), days)
|
||||||
|
});
|
||||||
Ok(anki_proto::stats::CardStatsResponse {
|
Ok(anki_proto::stats::CardStatsResponse {
|
||||||
card_id: card.id.into(),
|
card_id: card.id.into(),
|
||||||
note_id: card.note_id.into(),
|
note_id: card.note_id.into(),
|
||||||
|
@ -43,6 +53,8 @@ impl Collection {
|
||||||
card_type: nt.get_template(card.template_idx)?.name.clone(),
|
card_type: nt.get_template(card.template_idx)?.name.clone(),
|
||||||
notetype: nt.name.clone(),
|
notetype: nt.name.clone(),
|
||||||
revlog: revlog.iter().rev().map(stats_revlog_entry).collect(),
|
revlog: revlog.iter().rev().map(stats_revlog_entry).collect(),
|
||||||
|
fsrs_memory_state: card.fsrs_memory_state.map(Into::into),
|
||||||
|
fsrs_retrievability,
|
||||||
custom_data: card.custom_data,
|
custom_data: card.custom_data,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,16 +7,31 @@ use crate::card::CardType;
|
||||||
use crate::stats::graphs::GraphsContext;
|
use crate::stats::graphs::GraphsContext;
|
||||||
|
|
||||||
impl GraphsContext {
|
impl GraphsContext {
|
||||||
pub(super) fn eases(&self) -> Eases {
|
/// (SM-2, FSRS)
|
||||||
let mut data = Eases::default();
|
pub(super) fn eases(&self) -> (Eases, Eases) {
|
||||||
|
let mut eases = Eases::default();
|
||||||
|
let mut difficulty = Eases::default();
|
||||||
for card in &self.cards {
|
for card in &self.cards {
|
||||||
if matches!(card.ctype, CardType::Review | CardType::Relearn) {
|
if let Some(state) = card.fsrs_memory_state {
|
||||||
*data
|
*difficulty
|
||||||
|
.eases
|
||||||
|
.entry(round_to_nearest_five(
|
||||||
|
(state.difficulty - 1.0) / 9.0 * 100.0,
|
||||||
|
))
|
||||||
|
.or_insert_with(Default::default) += 1;
|
||||||
|
} else if matches!(card.ctype, CardType::Review | CardType::Relearn) {
|
||||||
|
*eases
|
||||||
.eases
|
.eases
|
||||||
.entry((card.ease_factor / 10) as u32)
|
.entry((card.ease_factor / 10) as u32)
|
||||||
.or_insert_with(Default::default) += 1;
|
.or_insert_with(Default::default) += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
data
|
(eases, difficulty)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(super) fn round_to_nearest_five(x: f32) -> u32 {
|
||||||
|
let scaled = x * 10.0;
|
||||||
|
let rounded = (scaled / 5.0).round() * 5.0;
|
||||||
|
(rounded / 10.0) as u32
|
||||||
|
}
|
||||||
|
|
|
@ -8,6 +8,7 @@ mod eases;
|
||||||
mod future_due;
|
mod future_due;
|
||||||
mod hours;
|
mod hours;
|
||||||
mod intervals;
|
mod intervals;
|
||||||
|
mod retrievability;
|
||||||
mod reviews;
|
mod reviews;
|
||||||
mod today;
|
mod today;
|
||||||
|
|
||||||
|
@ -60,17 +61,20 @@ impl Collection {
|
||||||
next_day_start: timing.next_day_at,
|
next_day_start: timing.next_day_at,
|
||||||
local_offset_secs,
|
local_offset_secs,
|
||||||
};
|
};
|
||||||
|
let (eases, difficulty) = ctx.eases();
|
||||||
let resp = anki_proto::stats::GraphsResponse {
|
let resp = anki_proto::stats::GraphsResponse {
|
||||||
added: Some(ctx.added_days()),
|
added: Some(ctx.added_days()),
|
||||||
reviews: Some(ctx.review_counts_and_times()),
|
reviews: Some(ctx.review_counts_and_times()),
|
||||||
future_due: Some(ctx.future_due()),
|
future_due: Some(ctx.future_due()),
|
||||||
intervals: Some(ctx.intervals()),
|
intervals: Some(ctx.intervals()),
|
||||||
eases: Some(ctx.eases()),
|
eases: Some(eases),
|
||||||
|
difficulty: Some(difficulty),
|
||||||
today: Some(ctx.today()),
|
today: Some(ctx.today()),
|
||||||
hours: Some(ctx.hours()),
|
hours: Some(ctx.hours()),
|
||||||
buttons: Some(ctx.buttons()),
|
buttons: Some(ctx.buttons()),
|
||||||
card_counts: Some(ctx.card_counts()),
|
card_counts: Some(ctx.card_counts()),
|
||||||
rollover_hour: self.rollover_for_current_scheduler()? as u32,
|
rollover_hour: self.rollover_for_current_scheduler()? as u32,
|
||||||
|
retrievability: Some(ctx.retrievability()),
|
||||||
};
|
};
|
||||||
Ok(resp)
|
Ok(resp)
|
||||||
}
|
}
|
||||||
|
|
35
rslib/src/stats/graphs/retrievability.rs
Normal file
35
rslib/src/stats/graphs/retrievability.rs
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
// Copyright: Ankitects Pty Ltd and contributors
|
||||||
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
|
use anki_proto::stats::graphs_response::Retrievability;
|
||||||
|
use fsrs::FSRS;
|
||||||
|
|
||||||
|
use crate::scheduler::timing::SchedTimingToday;
|
||||||
|
use crate::stats::graphs::eases::round_to_nearest_five;
|
||||||
|
use crate::stats::graphs::GraphsContext;
|
||||||
|
|
||||||
|
impl GraphsContext {
|
||||||
|
/// (SM-2, FSRS)
|
||||||
|
pub(super) fn retrievability(&self) -> Retrievability {
|
||||||
|
let mut retrievability = Retrievability::default();
|
||||||
|
let timing = SchedTimingToday {
|
||||||
|
days_elapsed: self.days_elapsed,
|
||||||
|
now: Default::default(),
|
||||||
|
next_day_at: Default::default(),
|
||||||
|
};
|
||||||
|
let fsrs = FSRS::new(None).unwrap();
|
||||||
|
for card in &self.cards {
|
||||||
|
if let Some(state) = card.fsrs_memory_state {
|
||||||
|
let r = fsrs.current_retrievability(
|
||||||
|
state.into(),
|
||||||
|
card.days_since_last_review(&timing).unwrap_or_default(),
|
||||||
|
);
|
||||||
|
*retrievability
|
||||||
|
.retrievability
|
||||||
|
.entry(round_to_nearest_five(r * 100.0))
|
||||||
|
.or_insert_with(Default::default) += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
retrievability
|
||||||
|
}
|
||||||
|
}
|
|
@ -12,19 +12,33 @@ use serde::Deserialize;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use crate::card::FsrsMemoryState;
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
use crate::serde::default_on_invalid;
|
use crate::serde::default_on_invalid;
|
||||||
|
|
||||||
/// Helper for serdeing the card data column.
|
/// Helper for serdeing the card data column.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)]
|
#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub(crate) struct CardData {
|
pub(crate) struct CardData {
|
||||||
#[serde(
|
#[serde(
|
||||||
skip_serializing_if = "Option::is_none",
|
|
||||||
rename = "pos",
|
rename = "pos",
|
||||||
|
skip_serializing_if = "Option::is_none",
|
||||||
deserialize_with = "default_on_invalid"
|
deserialize_with = "default_on_invalid"
|
||||||
)]
|
)]
|
||||||
pub(crate) original_position: Option<u32>,
|
pub(crate) original_position: Option<u32>,
|
||||||
|
#[serde(
|
||||||
|
rename = "s",
|
||||||
|
skip_serializing_if = "Option::is_none",
|
||||||
|
deserialize_with = "default_on_invalid"
|
||||||
|
)]
|
||||||
|
pub(crate) fsrs_stability: Option<f32>,
|
||||||
|
#[serde(
|
||||||
|
rename = "d",
|
||||||
|
skip_serializing_if = "Option::is_none",
|
||||||
|
deserialize_with = "default_on_invalid"
|
||||||
|
)]
|
||||||
|
pub(crate) fsrs_difficulty: Option<f32>,
|
||||||
|
|
||||||
/// A string representation of a JSON object storing optional data
|
/// A string representation of a JSON object storing optional data
|
||||||
/// associated with the card, so v3 custom scheduling code can persist
|
/// associated with the card, so v3 custom scheduling code can persist
|
||||||
/// state.
|
/// state.
|
||||||
|
@ -36,6 +50,8 @@ impl CardData {
|
||||||
pub(crate) fn from_card(card: &Card) -> Self {
|
pub(crate) fn from_card(card: &Card) -> Self {
|
||||||
Self {
|
Self {
|
||||||
original_position: card.original_position,
|
original_position: card.original_position,
|
||||||
|
fsrs_stability: card.fsrs_memory_state.as_ref().map(|m| m.stability),
|
||||||
|
fsrs_difficulty: card.fsrs_memory_state.as_ref().map(|m| m.difficulty),
|
||||||
custom_data: card.custom_data.clone(),
|
custom_data: card.custom_data.clone(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -43,6 +59,18 @@ impl CardData {
|
||||||
pub(crate) fn from_str(s: &str) -> Self {
|
pub(crate) fn from_str(s: &str) -> Self {
|
||||||
serde_json::from_str(s).unwrap_or_default()
|
serde_json::from_str(s).unwrap_or_default()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn fsrs_memory_state(&self) -> Option<FsrsMemoryState> {
|
||||||
|
if let Some(stability) = self.fsrs_stability {
|
||||||
|
if let Some(difficulty) = self.fsrs_difficulty {
|
||||||
|
return Some(FsrsMemoryState {
|
||||||
|
stability,
|
||||||
|
difficulty,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromSql for CardData {
|
impl FromSql for CardData {
|
||||||
|
|
|
@ -80,6 +80,7 @@ fn row_to_card(row: &Row) -> result::Result<Card, rusqlite::Error> {
|
||||||
original_deck_id: row.get(15)?,
|
original_deck_id: row.get(15)?,
|
||||||
flags: row.get(16)?,
|
flags: row.get(16)?,
|
||||||
original_position: data.original_position,
|
original_position: data.original_position,
|
||||||
|
fsrs_memory_state: data.fsrs_memory_state(),
|
||||||
custom_data: data.custom_data,
|
custom_data: data.custom_data,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -138,7 +138,7 @@ impl SqliteStorage {
|
||||||
self.db
|
self.db
|
||||||
.prepare_cached(concat!(
|
.prepare_cached(concat!(
|
||||||
include_str!("get.sql"),
|
include_str!("get.sql"),
|
||||||
" where cid in (select cid from search_cids) order by cid"
|
" where cid in (select cid from search_cids) order by cid, id"
|
||||||
))?
|
))?
|
||||||
.query_and_then([], row_to_revlog_entry)?
|
.query_and_then([], row_to_revlog_entry)?
|
||||||
.collect()
|
.collect()
|
||||||
|
|
|
@ -9,6 +9,7 @@ use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use fnv::FnvHasher;
|
use fnv::FnvHasher;
|
||||||
|
use fsrs::FSRS;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use rusqlite::functions::FunctionFlags;
|
use rusqlite::functions::FunctionFlags;
|
||||||
use rusqlite::params;
|
use rusqlite::params;
|
||||||
|
@ -70,6 +71,8 @@ fn open_or_create_collection_db(path: &Path) -> Result<Connection> {
|
||||||
add_without_combining_function(&db)?;
|
add_without_combining_function(&db)?;
|
||||||
add_fnvhash_function(&db)?;
|
add_fnvhash_function(&db)?;
|
||||||
add_extract_custom_data_function(&db)?;
|
add_extract_custom_data_function(&db)?;
|
||||||
|
add_extract_fsrs_variable(&db)?;
|
||||||
|
add_extract_fsrs_retrievability(&db)?;
|
||||||
|
|
||||||
db.create_collation("unicase", unicase_compare)?;
|
db.create_collation("unicase", unicase_compare)?;
|
||||||
|
|
||||||
|
@ -232,6 +235,76 @@ fn add_extract_custom_data_function(db: &Connection) -> rusqlite::Result<()> {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// eg. extract_fsrs_variable(card.data, 's' | 'd') -> float | null
|
||||||
|
fn add_extract_fsrs_variable(db: &Connection) -> rusqlite::Result<()> {
|
||||||
|
db.create_scalar_function(
|
||||||
|
"extract_fsrs_variable",
|
||||||
|
2,
|
||||||
|
FunctionFlags::SQLITE_DETERMINISTIC,
|
||||||
|
move |ctx| {
|
||||||
|
assert_eq!(ctx.len(), 2, "called with unexpected number of arguments");
|
||||||
|
|
||||||
|
let Ok(card_data) = ctx.get_raw(0).as_str() else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
if card_data.is_empty() {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
let Ok(key) = ctx.get_raw(1).as_str() else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
let card_data = &CardData::from_str(card_data);
|
||||||
|
Ok(match key {
|
||||||
|
"s" => card_data.fsrs_stability,
|
||||||
|
"d" => card_data.fsrs_difficulty,
|
||||||
|
_ => panic!("invalid key: {key}"),
|
||||||
|
})
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// eg. extract_fsrs_retrievability(card.data, card.due, card.ivl,
|
||||||
|
/// timing.days_elapsed) -> float | null
|
||||||
|
fn add_extract_fsrs_retrievability(db: &Connection) -> rusqlite::Result<()> {
|
||||||
|
db.create_scalar_function(
|
||||||
|
"extract_fsrs_retrievability",
|
||||||
|
4,
|
||||||
|
FunctionFlags::SQLITE_DETERMINISTIC,
|
||||||
|
move |ctx| {
|
||||||
|
assert_eq!(ctx.len(), 4, "called with unexpected number of arguments");
|
||||||
|
|
||||||
|
let Ok(card_data) = ctx.get_raw(0).as_str() else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
if card_data.is_empty() {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
let Ok(due) = ctx.get_raw(1).as_i64() else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
if due > 365_000 {
|
||||||
|
// learning card
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
let Ok(ivl) = ctx.get_raw(2).as_i64() else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
let Ok(days_elapsed) = ctx.get_raw(3).as_i64() else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
|
||||||
|
let review_day = due - ivl;
|
||||||
|
let days_elapsed = days_elapsed.saturating_sub(review_day) as u32;
|
||||||
|
let card_data = &CardData::from_str(card_data);
|
||||||
|
Ok(card_data.fsrs_memory_state().map(|state| {
|
||||||
|
FSRS::new(None)
|
||||||
|
.unwrap()
|
||||||
|
.current_retrievability(state.into(), days_elapsed)
|
||||||
|
}))
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
/// Fetch schema version from database.
|
/// Fetch schema version from database.
|
||||||
/// Return (must_create, version)
|
/// Return (must_create, version)
|
||||||
fn schema_version(db: &Connection) -> Result<(bool, u8)> {
|
fn schema_version(db: &Connection) -> Result<(bool, u8)> {
|
||||||
|
|
|
@ -310,10 +310,7 @@ impl Collection {
|
||||||
|
|
||||||
impl From<CardEntry> for Card {
|
impl From<CardEntry> for Card {
|
||||||
fn from(e: CardEntry) -> Self {
|
fn from(e: CardEntry) -> Self {
|
||||||
let CardData {
|
let data = CardData::from_str(&e.data);
|
||||||
original_position,
|
|
||||||
custom_data,
|
|
||||||
} = CardData::from_str(&e.data);
|
|
||||||
Card {
|
Card {
|
||||||
id: e.id,
|
id: e.id,
|
||||||
note_id: e.nid,
|
note_id: e.nid,
|
||||||
|
@ -332,8 +329,9 @@ impl From<CardEntry> for Card {
|
||||||
original_due: e.odue,
|
original_due: e.odue,
|
||||||
original_deck_id: e.odid,
|
original_deck_id: e.odid,
|
||||||
flags: e.flags,
|
flags: e.flags,
|
||||||
original_position,
|
original_position: data.original_position,
|
||||||
custom_data,
|
fsrs_memory_state: data.fsrs_memory_state(),
|
||||||
|
custom_data: data.custom_data,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,12 +55,43 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
value: timeSpan(stats.interval * DAY),
|
value: timeSpan(stats.interval * DAY),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
if (stats.fsrsMemoryState) {
|
||||||
|
let stability = timeSpan(
|
||||||
|
stats.fsrsMemoryState.stability * 86400,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
if (stats.fsrsMemoryState.stability > 31) {
|
||||||
|
const nativeStability = stats.fsrsMemoryState.stability.toFixed(0);
|
||||||
|
stability += ` (${nativeStability})`;
|
||||||
|
}
|
||||||
|
statsRows.push({
|
||||||
|
label: tr2.cardStatsFsrsStability(),
|
||||||
|
value: stability,
|
||||||
|
});
|
||||||
|
const difficulty = (
|
||||||
|
((stats.fsrsMemoryState.difficulty - 1.0) / 9.0) *
|
||||||
|
100.0
|
||||||
|
).toFixed(0);
|
||||||
|
statsRows.push({
|
||||||
|
label: tr2.cardStatsFsrsDifficulty(),
|
||||||
|
value: `${difficulty}%`,
|
||||||
|
});
|
||||||
|
if (stats.fsrsRetrievability) {
|
||||||
|
const retrievability = (stats.fsrsRetrievability * 100).toFixed(0);
|
||||||
|
statsRows.push({
|
||||||
|
label: tr2.cardStatsFsrsRetrievability(),
|
||||||
|
value: `${retrievability}%`,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} else {
|
||||||
if (stats.ease) {
|
if (stats.ease) {
|
||||||
statsRows.push({
|
statsRows.push({
|
||||||
label: tr2.cardStatsEase(),
|
label: tr2.cardStatsEase(),
|
||||||
value: `${stats.ease / 10}%`,
|
value: `${stats.ease / 10}%`,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
statsRows.push({ label: tr2.cardStatsReviewCount(), value: stats.reviews });
|
statsRows.push({ label: tr2.cardStatsReviewCount(), value: stats.reviews });
|
||||||
statsRows.push({ label: tr2.cardStatsLapseCount(), value: stats.lapses });
|
statsRows.push({ label: tr2.cardStatsLapseCount(), value: stats.lapses });
|
||||||
|
|
|
@ -16,6 +16,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
import TitledContainer from "../components/TitledContainer.svelte";
|
import TitledContainer from "../components/TitledContainer.svelte";
|
||||||
import type { HelpItem } from "../components/types";
|
import type { HelpItem } from "../components/types";
|
||||||
import CardStateCustomizer from "./CardStateCustomizer.svelte";
|
import CardStateCustomizer from "./CardStateCustomizer.svelte";
|
||||||
|
import FsrsOptions from "./FsrsOptions.svelte";
|
||||||
import type { DeckOptionsState } from "./lib";
|
import type { DeckOptionsState } from "./lib";
|
||||||
import SpinBoxFloatRow from "./SpinBoxFloatRow.svelte";
|
import SpinBoxFloatRow from "./SpinBoxFloatRow.svelte";
|
||||||
import SpinBoxRow from "./SpinBoxRow.svelte";
|
import SpinBoxRow from "./SpinBoxRow.svelte";
|
||||||
|
@ -87,6 +88,14 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
<DynamicallySlottable slotHost={Item} {api}>
|
<DynamicallySlottable slotHost={Item} {api}>
|
||||||
|
{#if state.v3Scheduler}
|
||||||
|
<Item>
|
||||||
|
<SwitchRow bind:value={$config.fsrsEnabled} defaultValue={false}>
|
||||||
|
<SettingTitle>FSRS</SettingTitle>
|
||||||
|
</SwitchRow>
|
||||||
|
</Item>
|
||||||
|
{/if}
|
||||||
|
|
||||||
<Item>
|
<Item>
|
||||||
<SpinBoxRow
|
<SpinBoxRow
|
||||||
bind:value={$config.maximumReviewInterval}
|
bind:value={$config.maximumReviewInterval}
|
||||||
|
@ -103,6 +112,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
</SpinBoxRow>
|
</SpinBoxRow>
|
||||||
</Item>
|
</Item>
|
||||||
|
|
||||||
|
{#if !$config.fsrsEnabled || !state.v3Scheduler}
|
||||||
<Item>
|
<Item>
|
||||||
<SpinBoxFloatRow
|
<SpinBoxFloatRow
|
||||||
bind:value={$config.initialEase}
|
bind:value={$config.initialEase}
|
||||||
|
@ -112,7 +122,9 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
>
|
>
|
||||||
<SettingTitle
|
<SettingTitle
|
||||||
on:click={() =>
|
on:click={() =>
|
||||||
openHelpModal(Object.keys(settings).indexOf("startingEase"))}
|
openHelpModal(
|
||||||
|
Object.keys(settings).indexOf("startingEase"),
|
||||||
|
)}
|
||||||
>
|
>
|
||||||
{settings.startingEase.title}
|
{settings.startingEase.title}
|
||||||
</SettingTitle>
|
</SettingTitle>
|
||||||
|
@ -162,7 +174,9 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
>
|
>
|
||||||
<SettingTitle
|
<SettingTitle
|
||||||
on:click={() =>
|
on:click={() =>
|
||||||
openHelpModal(Object.keys(settings).indexOf("hardInterval"))}
|
openHelpModal(
|
||||||
|
Object.keys(settings).indexOf("hardInterval"),
|
||||||
|
)}
|
||||||
>
|
>
|
||||||
{settings.hardInterval.title}
|
{settings.hardInterval.title}
|
||||||
</SettingTitle>
|
</SettingTitle>
|
||||||
|
@ -183,13 +197,8 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
</SettingTitle>
|
</SettingTitle>
|
||||||
</SpinBoxFloatRow>
|
</SpinBoxFloatRow>
|
||||||
</Item>
|
</Item>
|
||||||
|
{:else}
|
||||||
{#if state.v3Scheduler}
|
<FsrsOptions {state} />
|
||||||
<Item>
|
|
||||||
<SwitchRow bind:value={$config.fsrsEnabled} defaultValue={false}>
|
|
||||||
<SettingTitle>FSRS optimizer</SettingTitle>
|
|
||||||
</SwitchRow>
|
|
||||||
</Item>
|
|
||||||
{/if}
|
{/if}
|
||||||
|
|
||||||
{#if state.v3Scheduler}
|
{#if state.v3Scheduler}
|
||||||
|
|
|
@ -3,33 +3,29 @@ Copyright: Ankitects Pty Ltd and contributors
|
||||||
License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
-->
|
-->
|
||||||
<script lang="ts">
|
<script lang="ts">
|
||||||
import Col from "../components/Col.svelte";
|
|
||||||
import ConfigInput from "../components/ConfigInput.svelte";
|
import ConfigInput from "../components/ConfigInput.svelte";
|
||||||
import RevertButton from "../components/RevertButton.svelte";
|
import RevertButton from "../components/RevertButton.svelte";
|
||||||
import Row from "../components/Row.svelte";
|
|
||||||
import SettingTitle from "../components/SettingTitle.svelte";
|
|
||||||
|
|
||||||
export let value: string;
|
export let value: string;
|
||||||
export let title: string;
|
export let title: string;
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
<Row>
|
<div class="m-2">
|
||||||
<Col>
|
|
||||||
<div class="text">
|
|
||||||
<ConfigInput>
|
<ConfigInput>
|
||||||
<SettingTitle on:click>{title}</SettingTitle>
|
|
||||||
<RevertButton slot="revert" bind:value defaultValue="" />
|
<RevertButton slot="revert" bind:value defaultValue="" />
|
||||||
</ConfigInput>
|
<details>
|
||||||
</div>
|
<summary>{title}</summary>
|
||||||
</Col>
|
<div class="text">
|
||||||
</Row>
|
<textarea
|
||||||
|
|
||||||
<textarea
|
|
||||||
class="card-state-customizer form-control"
|
class="card-state-customizer form-control"
|
||||||
bind:value
|
bind:value
|
||||||
spellcheck="false"
|
spellcheck="false"
|
||||||
autocapitalize="none"
|
autocapitalize="none"
|
||||||
/>
|
/>
|
||||||
|
</div>
|
||||||
|
</details>
|
||||||
|
</ConfigInput>
|
||||||
|
</div>
|
||||||
|
|
||||||
<style lang="scss">
|
<style lang="scss">
|
||||||
.text {
|
.text {
|
||||||
|
|
|
@ -17,7 +17,6 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
import ConfigSelector from "./ConfigSelector.svelte";
|
import ConfigSelector from "./ConfigSelector.svelte";
|
||||||
import DailyLimits from "./DailyLimits.svelte";
|
import DailyLimits from "./DailyLimits.svelte";
|
||||||
import DisplayOrder from "./DisplayOrder.svelte";
|
import DisplayOrder from "./DisplayOrder.svelte";
|
||||||
import FsrsOptions from "./FsrsOptions.svelte";
|
|
||||||
import HtmlAddon from "./HtmlAddon.svelte";
|
import HtmlAddon from "./HtmlAddon.svelte";
|
||||||
import LapseOptions from "./LapseOptions.svelte";
|
import LapseOptions from "./LapseOptions.svelte";
|
||||||
import type { DeckOptionsState } from "./lib";
|
import type { DeckOptionsState } from "./lib";
|
||||||
|
@ -26,7 +25,6 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
export let state: DeckOptionsState;
|
export let state: DeckOptionsState;
|
||||||
const addons = state.addonComponents;
|
const addons = state.addonComponents;
|
||||||
const config = state.currentConfig;
|
|
||||||
|
|
||||||
export function auxData(): Writable<Record<string, unknown>> {
|
export function auxData(): Writable<Record<string, unknown>> {
|
||||||
return state.currentAuxData;
|
return state.currentAuxData;
|
||||||
|
@ -122,14 +120,6 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
</Item>
|
</Item>
|
||||||
{/if}
|
{/if}
|
||||||
|
|
||||||
{#if state.v3Scheduler && $config.fsrsEnabled}
|
|
||||||
<Item>
|
|
||||||
<Row class="row-columns">
|
|
||||||
<FsrsOptions {state} />
|
|
||||||
</Row>
|
|
||||||
</Item>
|
|
||||||
{/if}
|
|
||||||
|
|
||||||
<Item>
|
<Item>
|
||||||
<Row class="row-columns">
|
<Row class="row-columns">
|
||||||
<AdvancedOptions {state} api={advancedOptions} />
|
<AdvancedOptions {state} api={advancedOptions} />
|
||||||
|
|
|
@ -4,8 +4,8 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
-->
|
-->
|
||||||
<script lang="ts">
|
<script lang="ts">
|
||||||
import {
|
import {
|
||||||
Progress_ComputeRetention,
|
ComputeRetentionProgress,
|
||||||
type Progress_ComputeWeights,
|
type ComputeWeightsProgress,
|
||||||
} from "@tslib/anki/collection_pb";
|
} from "@tslib/anki/collection_pb";
|
||||||
import { ComputeOptimalRetentionRequest } from "@tslib/anki/scheduler_pb";
|
import { ComputeOptimalRetentionRequest } from "@tslib/anki/scheduler_pb";
|
||||||
import {
|
import {
|
||||||
|
@ -14,26 +14,28 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
evaluateWeights,
|
evaluateWeights,
|
||||||
setWantsAbort,
|
setWantsAbort,
|
||||||
} from "@tslib/backend";
|
} from "@tslib/backend";
|
||||||
|
import * as tr from "@tslib/ftl";
|
||||||
import { runWithBackendProgress } from "@tslib/progress";
|
import { runWithBackendProgress } from "@tslib/progress";
|
||||||
import TitledContainer from "components/TitledContainer.svelte";
|
|
||||||
|
|
||||||
import ConfigInput from "../components/ConfigInput.svelte";
|
|
||||||
import RevertButton from "../components/RevertButton.svelte";
|
|
||||||
import SettingTitle from "../components/SettingTitle.svelte";
|
import SettingTitle from "../components/SettingTitle.svelte";
|
||||||
import type { DeckOptionsState } from "./lib";
|
import type { DeckOptionsState } from "./lib";
|
||||||
|
import SpinBoxFloatRow from "./SpinBoxFloatRow.svelte";
|
||||||
|
import Warning from "./Warning.svelte";
|
||||||
import WeightsInputRow from "./WeightsInputRow.svelte";
|
import WeightsInputRow from "./WeightsInputRow.svelte";
|
||||||
|
|
||||||
export let state: DeckOptionsState;
|
export let state: DeckOptionsState;
|
||||||
|
|
||||||
const config = state.currentConfig;
|
const config = state.currentConfig;
|
||||||
|
const defaults = state.defaults;
|
||||||
|
|
||||||
let computeWeightsProgress: Progress_ComputeWeights | undefined;
|
let computeWeightsProgress: ComputeWeightsProgress | undefined;
|
||||||
|
let computeWeightsWarning = "";
|
||||||
let customSearch = "";
|
let customSearch = "";
|
||||||
let computing = false;
|
let computing = false;
|
||||||
|
|
||||||
let computeRetentionProgress:
|
let computeRetentionProgress:
|
||||||
| Progress_ComputeWeights
|
| ComputeWeightsProgress
|
||||||
| Progress_ComputeRetention
|
| ComputeRetentionProgress
|
||||||
| undefined;
|
| undefined;
|
||||||
|
|
||||||
const computeOptimalRequest = new ComputeOptimalRetentionRequest({
|
const computeOptimalRequest = new ComputeOptimalRetentionRequest({
|
||||||
|
@ -41,9 +43,18 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
daysToSimulate: 365,
|
daysToSimulate: 365,
|
||||||
maxSecondsOfStudyPerDay: 1800,
|
maxSecondsOfStudyPerDay: 1800,
|
||||||
maxInterval: 36500,
|
maxInterval: 36500,
|
||||||
recallSecs: 10,
|
recallSecsHard: 14.0,
|
||||||
|
recallSecsGood: 10.0,
|
||||||
|
recallSecsEasy: 6.0,
|
||||||
forgetSecs: 50,
|
forgetSecs: 50,
|
||||||
learnSecs: 20,
|
learnSecs: 20,
|
||||||
|
firstRatingProbabilityAgain: 0.15,
|
||||||
|
firstRatingProbabilityHard: 0.2,
|
||||||
|
firstRatingProbabilityGood: 0.6,
|
||||||
|
firstRatingProbabilityEasy: 0.05,
|
||||||
|
reviewRatingProbabilityHard: 0.3,
|
||||||
|
reviewRatingProbabilityGood: 0.6,
|
||||||
|
reviewRatingProbabilityEasy: 0.1,
|
||||||
});
|
});
|
||||||
|
|
||||||
async function computeWeights(): Promise<void> {
|
async function computeWeights(): Promise<void> {
|
||||||
|
@ -62,6 +73,13 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
if (computeWeightsProgress) {
|
if (computeWeightsProgress) {
|
||||||
computeWeightsProgress.current = computeWeightsProgress.total;
|
computeWeightsProgress.current = computeWeightsProgress.total;
|
||||||
}
|
}
|
||||||
|
if (resp.fsrsItems < 1000) {
|
||||||
|
computeWeightsWarning = tr.deckConfigLimitedHistory({
|
||||||
|
count: resp.fsrsItems,
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
computeWeightsWarning = "";
|
||||||
|
}
|
||||||
$config.fsrsWeights = resp.weights;
|
$config.fsrsWeights = resp.weights;
|
||||||
},
|
},
|
||||||
(progress) => {
|
(progress) => {
|
||||||
|
@ -97,7 +115,9 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
alert(
|
alert(
|
||||||
`Log loss: ${resp.logLoss.toFixed(
|
`Log loss: ${resp.logLoss.toFixed(
|
||||||
3,
|
3,
|
||||||
)}, RMSE: ${resp.rmse.toFixed(3)}`,
|
)}, RMSE(bins): ${resp.rmseBins.toFixed(
|
||||||
|
3,
|
||||||
|
)}. ${tr.deckConfigSmallerIsBetter()}`,
|
||||||
),
|
),
|
||||||
200,
|
200,
|
||||||
);
|
);
|
||||||
|
@ -146,21 +166,21 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
computeRetentionProgress,
|
computeRetentionProgress,
|
||||||
);
|
);
|
||||||
|
|
||||||
function renderWeightProgress(val: Progress_ComputeWeights | undefined): String {
|
function renderWeightProgress(val: ComputeWeightsProgress | undefined): String {
|
||||||
if (!val || !val.total) {
|
if (!val || !val.total) {
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
let pct = ((val.current / val.total) * 100).toFixed(2);
|
let pct = ((val.current / val.total) * 100).toFixed(2);
|
||||||
pct = `${pct}%`;
|
pct = `${pct}%`;
|
||||||
if (val instanceof Progress_ComputeRetention) {
|
if (val instanceof ComputeRetentionProgress) {
|
||||||
return pct;
|
return pct;
|
||||||
} else {
|
} else {
|
||||||
return `${pct} of ${val.revlogEntries} reviews`;
|
return `${pct} of ${val.fsrsItems} reviews`;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function renderRetentionProgress(
|
function renderRetentionProgress(
|
||||||
val: Progress_ComputeRetention | undefined,
|
val: ComputeRetentionProgress | undefined,
|
||||||
): String {
|
): String {
|
||||||
if (!val || !val.total) {
|
if (!val || !val.total) {
|
||||||
return "";
|
return "";
|
||||||
|
@ -170,35 +190,29 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
}
|
}
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
<TitledContainer title={"FSRS"}>
|
<SpinBoxFloatRow
|
||||||
<WeightsInputRow
|
|
||||||
bind:value={$config.fsrsWeights}
|
|
||||||
defaultValue={[
|
|
||||||
0.4, 0.6, 2.4, 5.8, 4.93, 0.94, 0.86, 0.01, 1.49, 0.14, 0.94, 2.18, 0.05,
|
|
||||||
0.34, 1.26, 0.29, 2.61,
|
|
||||||
]}
|
|
||||||
>
|
|
||||||
<SettingTitle>Weights</SettingTitle>
|
|
||||||
</WeightsInputRow>
|
|
||||||
<div>Optimal retention</div>
|
|
||||||
|
|
||||||
<ConfigInput>
|
|
||||||
<input type="number" bind:value={$config.desiredRetention} />
|
|
||||||
<RevertButton
|
|
||||||
slot="revert"
|
|
||||||
bind:value={$config.desiredRetention}
|
bind:value={$config.desiredRetention}
|
||||||
defaultValue={0.9}
|
defaultValue={defaults.desiredRetention}
|
||||||
/>
|
min={0.8}
|
||||||
</ConfigInput>
|
max={0.97}
|
||||||
|
>
|
||||||
|
<SettingTitle>
|
||||||
|
{tr.deckConfigDesiredRetention()}
|
||||||
|
</SettingTitle>
|
||||||
|
</SpinBoxFloatRow>
|
||||||
|
|
||||||
<div class="mb-3" />
|
<div class="ms-1 me-1">
|
||||||
|
<WeightsInputRow bind:value={$config.fsrsWeights} defaultValue={[]}>
|
||||||
|
<SettingTitle>{tr.deckConfigWeights()}</SettingTitle>
|
||||||
|
</WeightsInputRow>
|
||||||
|
</div>
|
||||||
|
|
||||||
<div class="bordered">
|
<div class="m-2">
|
||||||
<b>Optimize weights</b>
|
<details>
|
||||||
<br />
|
<summary>{tr.deckConfigComputeOptimalWeights()}</summary>
|
||||||
<input
|
<input
|
||||||
bind:value={customSearch}
|
bind:value={customSearch}
|
||||||
placeholder="Search; leave blank for all cards using this preset"
|
placeholder={tr.deckConfigComputeWeightsSearch()}
|
||||||
class="w-100 mb-1"
|
class="w-100 mb-1"
|
||||||
/>
|
/>
|
||||||
<button
|
<button
|
||||||
|
@ -206,9 +220,9 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
on:click={() => computeWeights()}
|
on:click={() => computeWeights()}
|
||||||
>
|
>
|
||||||
{#if computing}
|
{#if computing}
|
||||||
Cancel
|
{tr.actionsCancel()}
|
||||||
{:else}
|
{:else}
|
||||||
Compute
|
{tr.deckConfigComputeButton()}
|
||||||
{/if}
|
{/if}
|
||||||
</button>
|
</button>
|
||||||
<button
|
<button
|
||||||
|
@ -216,17 +230,19 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
on:click={() => checkWeights()}
|
on:click={() => checkWeights()}
|
||||||
>
|
>
|
||||||
{#if computing}
|
{#if computing}
|
||||||
Cancel
|
{tr.actionsCancel()}
|
||||||
{:else}
|
{:else}
|
||||||
Check
|
{tr.deckConfigAnalyzeButton()}
|
||||||
{/if}
|
{/if}
|
||||||
</button>
|
</button>
|
||||||
<div>{computeWeightsProgressString}</div>
|
{#if computing}<div>{computeWeightsProgressString}</div>{/if}
|
||||||
</div>
|
<Warning warning={computeWeightsWarning} />
|
||||||
|
</details>
|
||||||
|
</div>
|
||||||
|
|
||||||
<div class="bordered">
|
<div class="m-2">
|
||||||
<b>Calculate optimal retention</b>
|
<details>
|
||||||
<br />
|
<summary>{tr.deckConfigComputeOptimalRetention()}</summary>
|
||||||
|
|
||||||
Deck size:
|
Deck size:
|
||||||
<br />
|
<br />
|
||||||
|
@ -251,39 +267,100 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
<input type="number" bind:value={computeOptimalRequest.maxInterval} />
|
<input type="number" bind:value={computeOptimalRequest.maxInterval} />
|
||||||
<br />
|
<br />
|
||||||
|
|
||||||
Seconds to recall a card:
|
Seconds to forget a card (again):
|
||||||
<br />
|
|
||||||
<input type="number" bind:value={computeOptimalRequest.recallSecs} />
|
|
||||||
<br />
|
|
||||||
|
|
||||||
Seconds to forget a card:
|
|
||||||
<br />
|
<br />
|
||||||
<input type="number" bind:value={computeOptimalRequest.forgetSecs} />
|
<input type="number" bind:value={computeOptimalRequest.forgetSecs} />
|
||||||
<br />
|
<br />
|
||||||
|
|
||||||
|
Seconds to recall a card (hard):
|
||||||
|
<br />
|
||||||
|
<input type="number" bind:value={computeOptimalRequest.recallSecsHard} />
|
||||||
|
<br />
|
||||||
|
|
||||||
|
Seconds to recall a card (good):
|
||||||
|
<br />
|
||||||
|
<input type="number" bind:value={computeOptimalRequest.recallSecsGood} />
|
||||||
|
<br />
|
||||||
|
|
||||||
|
Seconds to recall a card (easy):
|
||||||
|
<br />
|
||||||
|
<input type="number" bind:value={computeOptimalRequest.recallSecsEasy} />
|
||||||
|
<br />
|
||||||
|
|
||||||
Seconds to learn a card:
|
Seconds to learn a card:
|
||||||
<br />
|
<br />
|
||||||
<input type="number" bind:value={computeOptimalRequest.learnSecs} />
|
<input type="number" bind:value={computeOptimalRequest.learnSecs} />
|
||||||
<br />
|
<br />
|
||||||
|
|
||||||
|
First rating probability (again):
|
||||||
|
<br />
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
bind:value={computeOptimalRequest.firstRatingProbabilityAgain}
|
||||||
|
/>
|
||||||
|
<br />
|
||||||
|
|
||||||
|
First rating probability (hard):
|
||||||
|
<br />
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
bind:value={computeOptimalRequest.firstRatingProbabilityHard}
|
||||||
|
/>
|
||||||
|
<br />
|
||||||
|
|
||||||
|
First rating probability (good):
|
||||||
|
<br />
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
bind:value={computeOptimalRequest.firstRatingProbabilityGood}
|
||||||
|
/>
|
||||||
|
<br />
|
||||||
|
|
||||||
|
First rating probability (easy):
|
||||||
|
<br />
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
bind:value={computeOptimalRequest.firstRatingProbabilityEasy}
|
||||||
|
/>
|
||||||
|
<br />
|
||||||
|
|
||||||
|
Review rating probability (hard):
|
||||||
|
<br />
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
bind:value={computeOptimalRequest.reviewRatingProbabilityHard}
|
||||||
|
/>
|
||||||
|
<br />
|
||||||
|
|
||||||
|
Review rating probability (good):
|
||||||
|
<br />
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
bind:value={computeOptimalRequest.reviewRatingProbabilityGood}
|
||||||
|
/>
|
||||||
|
<br />
|
||||||
|
|
||||||
|
Review rating probability (easy):
|
||||||
|
<br />
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
bind:value={computeOptimalRequest.reviewRatingProbabilityEasy}
|
||||||
|
/>
|
||||||
|
<br />
|
||||||
|
|
||||||
<button
|
<button
|
||||||
class="btn {computing ? 'btn-warning' : 'btn-primary'}"
|
class="btn {computing ? 'btn-warning' : 'btn-primary'}"
|
||||||
on:click={() => computeRetention()}
|
on:click={() => computeRetention()}
|
||||||
>
|
>
|
||||||
{#if computing}
|
{#if computing}
|
||||||
Cancel
|
{tr.actionsCancel()}
|
||||||
{:else}
|
{:else}
|
||||||
Compute
|
{tr.deckConfigComputeButton()}
|
||||||
{/if}
|
{/if}
|
||||||
</button>
|
</button>
|
||||||
<div>{computeRetentionProgressString}</div>
|
<div>{computeRetentionProgressString}</div>
|
||||||
</div>
|
</details>
|
||||||
</TitledContainer>
|
</div>
|
||||||
|
|
||||||
<style>
|
<style>
|
||||||
.bordered {
|
|
||||||
border: 1px solid #777;
|
|
||||||
padding: 1em;
|
|
||||||
margin-bottom: 2px;
|
|
||||||
}
|
|
||||||
</style>
|
</style>
|
||||||
|
|
|
@ -28,6 +28,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
const defaults = state.defaults;
|
const defaults = state.defaults;
|
||||||
|
|
||||||
let stepsExceedMinimumInterval: string;
|
let stepsExceedMinimumInterval: string;
|
||||||
|
let stepsTooLargeForFsrs: string;
|
||||||
$: {
|
$: {
|
||||||
const lastRelearnStepInDays = $config.relearnSteps.length
|
const lastRelearnStepInDays = $config.relearnSteps.length
|
||||||
? $config.relearnSteps[$config.relearnSteps.length - 1] / 60 / 24
|
? $config.relearnSteps[$config.relearnSteps.length - 1] / 60 / 24
|
||||||
|
@ -36,6 +37,10 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
lastRelearnStepInDays > $config.minimumLapseInterval
|
lastRelearnStepInDays > $config.minimumLapseInterval
|
||||||
? tr.deckConfigRelearningStepsAboveMinimumInterval()
|
? tr.deckConfigRelearningStepsAboveMinimumInterval()
|
||||||
: "";
|
: "";
|
||||||
|
stepsTooLargeForFsrs =
|
||||||
|
$config.fsrsEnabled && lastRelearnStepInDays >= 1
|
||||||
|
? tr.deckConfigStepsTooLargeForFsrs()
|
||||||
|
: "";
|
||||||
}
|
}
|
||||||
|
|
||||||
const settings = {
|
const settings = {
|
||||||
|
@ -97,6 +102,11 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
</StepsInputRow>
|
</StepsInputRow>
|
||||||
</Item>
|
</Item>
|
||||||
|
|
||||||
|
<Item>
|
||||||
|
<Warning warning={stepsTooLargeForFsrs} />
|
||||||
|
</Item>
|
||||||
|
|
||||||
|
{#if !$config.fsrsEnabled}
|
||||||
<Item>
|
<Item>
|
||||||
<SpinBoxRow
|
<SpinBoxRow
|
||||||
bind:value={$config.minimumLapseInterval}
|
bind:value={$config.minimumLapseInterval}
|
||||||
|
@ -105,12 +115,15 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
>
|
>
|
||||||
<SettingTitle
|
<SettingTitle
|
||||||
on:click={() =>
|
on:click={() =>
|
||||||
openHelpModal(Object.keys(settings).indexOf("minimumInterval"))}
|
openHelpModal(
|
||||||
|
Object.keys(settings).indexOf("minimumInterval"),
|
||||||
|
)}
|
||||||
>
|
>
|
||||||
{settings.minimumInterval.title}
|
{settings.minimumInterval.title}
|
||||||
</SettingTitle>
|
</SettingTitle>
|
||||||
</SpinBoxRow>
|
</SpinBoxRow>
|
||||||
</Item>
|
</Item>
|
||||||
|
{/if}
|
||||||
|
|
||||||
<Item>
|
<Item>
|
||||||
<Warning warning={stepsExceedMinimumInterval} />
|
<Warning warning={stepsExceedMinimumInterval} />
|
||||||
|
|
|
@ -29,6 +29,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
const defaults = state.defaults;
|
const defaults = state.defaults;
|
||||||
|
|
||||||
let stepsExceedGraduatingInterval: string;
|
let stepsExceedGraduatingInterval: string;
|
||||||
|
let stepsTooLargeForFsrs: string;
|
||||||
$: {
|
$: {
|
||||||
const lastLearnStepInDays = $config.learnSteps.length
|
const lastLearnStepInDays = $config.learnSteps.length
|
||||||
? $config.learnSteps[$config.learnSteps.length - 1] / 60 / 24
|
? $config.learnSteps[$config.learnSteps.length - 1] / 60 / 24
|
||||||
|
@ -37,6 +38,10 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
lastLearnStepInDays > $config.graduatingIntervalGood
|
lastLearnStepInDays > $config.graduatingIntervalGood
|
||||||
? tr.deckConfigLearningStepAboveGraduatingInterval()
|
? tr.deckConfigLearningStepAboveGraduatingInterval()
|
||||||
: "";
|
: "";
|
||||||
|
stepsTooLargeForFsrs =
|
||||||
|
$config.fsrsEnabled && lastLearnStepInDays >= 1
|
||||||
|
? tr.deckConfigStepsTooLargeForFsrs()
|
||||||
|
: "";
|
||||||
}
|
}
|
||||||
|
|
||||||
$: goodExceedsEasy =
|
$: goodExceedsEasy =
|
||||||
|
@ -109,6 +114,11 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
</StepsInputRow>
|
</StepsInputRow>
|
||||||
</Item>
|
</Item>
|
||||||
|
|
||||||
|
<Item>
|
||||||
|
<Warning warning={stepsTooLargeForFsrs} />
|
||||||
|
</Item>
|
||||||
|
|
||||||
|
{#if !$config.fsrsEnabled}
|
||||||
<Item>
|
<Item>
|
||||||
<SpinBoxRow
|
<SpinBoxRow
|
||||||
bind:value={$config.graduatingIntervalGood}
|
bind:value={$config.graduatingIntervalGood}
|
||||||
|
@ -136,7 +146,9 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
>
|
>
|
||||||
<SettingTitle
|
<SettingTitle
|
||||||
on:click={() =>
|
on:click={() =>
|
||||||
openHelpModal(Object.keys(settings).indexOf("easyInterval"))}
|
openHelpModal(
|
||||||
|
Object.keys(settings).indexOf("easyInterval"),
|
||||||
|
)}
|
||||||
>
|
>
|
||||||
{settings.easyInterval.title}
|
{settings.easyInterval.title}
|
||||||
</SettingTitle>
|
</SettingTitle>
|
||||||
|
@ -146,6 +158,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
<Item>
|
<Item>
|
||||||
<Warning warning={goodExceedsEasy} />
|
<Warning warning={goodExceedsEasy} />
|
||||||
</Item>
|
</Item>
|
||||||
|
{/if}
|
||||||
|
|
||||||
<Item>
|
<Item>
|
||||||
<EnumSelectorRow
|
<EnumSelectorRow
|
||||||
|
|
|
@ -9,7 +9,11 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
$: stringValue = value.map((v) => v.toFixed(4)).join(", ");
|
$: stringValue = value.map((v) => v.toFixed(4)).join(", ");
|
||||||
|
|
||||||
function update(this: HTMLInputElement): void {
|
function update(this: HTMLInputElement): void {
|
||||||
value = this.value.split(", ").map((v) => Number(v));
|
value = this.value
|
||||||
|
.replace(/ /g, "")
|
||||||
|
.split(",")
|
||||||
|
.filter((e) => e)
|
||||||
|
.map((v) => Number(v));
|
||||||
}
|
}
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
|
|
44
ts/graphs/DifficultyGraph.svelte
Normal file
44
ts/graphs/DifficultyGraph.svelte
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
<!--
|
||||||
|
Copyright: Ankitects Pty Ltd and contributors
|
||||||
|
License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
-->
|
||||||
|
<script lang="ts">
|
||||||
|
import type { GraphsResponse } from "@tslib/anki/stats_pb";
|
||||||
|
import * as tr from "@tslib/ftl";
|
||||||
|
import { createEventDispatcher } from "svelte";
|
||||||
|
|
||||||
|
import { gatherData, prepareData } from "./difficulty";
|
||||||
|
import Graph from "./Graph.svelte";
|
||||||
|
import type { GraphPrefs } from "./graph-helpers";
|
||||||
|
import type { SearchEventMap, TableDatum } from "./graph-helpers";
|
||||||
|
import type { HistogramData } from "./histogram-graph";
|
||||||
|
import HistogramGraph from "./HistogramGraph.svelte";
|
||||||
|
import TableData from "./TableData.svelte";
|
||||||
|
|
||||||
|
export let sourceData: GraphsResponse | null = null;
|
||||||
|
export let prefs: GraphPrefs;
|
||||||
|
|
||||||
|
const dispatch = createEventDispatcher<SearchEventMap>();
|
||||||
|
|
||||||
|
let histogramData = null as HistogramData | null;
|
||||||
|
let tableData: TableDatum[] = [];
|
||||||
|
|
||||||
|
$: if (sourceData) {
|
||||||
|
[histogramData, tableData] = prepareData(
|
||||||
|
gatherData(sourceData),
|
||||||
|
dispatch,
|
||||||
|
$prefs.browserLinksSupported,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const title = tr.statisticsCardDifficultyTitle();
|
||||||
|
const subtitle = tr.statisticsCardDifficultySubtitle();
|
||||||
|
</script>
|
||||||
|
|
||||||
|
{#if histogramData}
|
||||||
|
<Graph {title} {subtitle}>
|
||||||
|
<HistogramGraph data={histogramData} />
|
||||||
|
|
||||||
|
<TableData {tableData} />
|
||||||
|
</Graph>
|
||||||
|
{/if}
|
|
@ -35,8 +35,10 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
const subtitle = tr.statisticsCardEaseSubtitle();
|
const subtitle = tr.statisticsCardEaseSubtitle();
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
<Graph {title} {subtitle}>
|
{#if histogramData}
|
||||||
|
<Graph {title} {subtitle}>
|
||||||
<HistogramGraph data={histogramData} />
|
<HistogramGraph data={histogramData} />
|
||||||
|
|
||||||
<TableData {tableData} />
|
<TableData {tableData} />
|
||||||
</Graph>
|
</Graph>
|
||||||
|
{/if}
|
||||||
|
|
44
ts/graphs/RetrievabilityGraph.svelte
Normal file
44
ts/graphs/RetrievabilityGraph.svelte
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
<!--
|
||||||
|
Copyright: Ankitects Pty Ltd and contributors
|
||||||
|
License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
-->
|
||||||
|
<script lang="ts">
|
||||||
|
import type { GraphsResponse } from "@tslib/anki/stats_pb";
|
||||||
|
import * as tr from "@tslib/ftl";
|
||||||
|
import { createEventDispatcher } from "svelte";
|
||||||
|
|
||||||
|
import Graph from "./Graph.svelte";
|
||||||
|
import type { GraphPrefs } from "./graph-helpers";
|
||||||
|
import type { SearchEventMap, TableDatum } from "./graph-helpers";
|
||||||
|
import type { HistogramData } from "./histogram-graph";
|
||||||
|
import HistogramGraph from "./HistogramGraph.svelte";
|
||||||
|
import { gatherData, prepareData } from "./retrievability";
|
||||||
|
import TableData from "./TableData.svelte";
|
||||||
|
|
||||||
|
export let sourceData: GraphsResponse | null = null;
|
||||||
|
export let prefs: GraphPrefs;
|
||||||
|
|
||||||
|
const dispatch = createEventDispatcher<SearchEventMap>();
|
||||||
|
|
||||||
|
let histogramData = null as HistogramData | null;
|
||||||
|
let tableData: TableDatum[] = [];
|
||||||
|
|
||||||
|
$: if (sourceData) {
|
||||||
|
[histogramData, tableData] = prepareData(
|
||||||
|
gatherData(sourceData),
|
||||||
|
dispatch,
|
||||||
|
$prefs.browserLinksSupported,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const title = tr.statisticsCardRetrievabilityTitle();
|
||||||
|
const subtitle = tr.statisticsRetrievabilitySubtitle();
|
||||||
|
</script>
|
||||||
|
|
||||||
|
{#if histogramData}
|
||||||
|
<Graph {title} {subtitle}>
|
||||||
|
<HistogramGraph data={histogramData} />
|
||||||
|
|
||||||
|
<TableData {tableData} />
|
||||||
|
</Graph>
|
||||||
|
{/if}
|
122
ts/graphs/difficulty.ts
Normal file
122
ts/graphs/difficulty.ts
Normal file
|
@ -0,0 +1,122 @@
|
||||||
|
// Copyright: Ankitects Pty Ltd and contributors
|
||||||
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
|
/* eslint
|
||||||
|
@typescript-eslint/no-explicit-any: "off",
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { GraphsResponse } from "@tslib/anki/stats_pb";
|
||||||
|
import * as tr from "@tslib/ftl";
|
||||||
|
import { localizedNumber } from "@tslib/i18n";
|
||||||
|
import type { Bin, ScaleLinear } from "d3";
|
||||||
|
import { bin, interpolateRdYlGn, scaleLinear, scaleSequential, sum } from "d3";
|
||||||
|
|
||||||
|
import type { SearchDispatch, TableDatum } from "./graph-helpers";
|
||||||
|
import { getNumericMapBinValue, numericMap } from "./graph-helpers";
|
||||||
|
import type { HistogramData } from "./histogram-graph";
|
||||||
|
|
||||||
|
export interface GraphData {
|
||||||
|
eases: Map<number, number>;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function gatherData(data: GraphsResponse): GraphData {
|
||||||
|
return { eases: numericMap(data.difficulty!.eases) };
|
||||||
|
}
|
||||||
|
|
||||||
|
function makeQuery(start: number, end: number): string {
|
||||||
|
const fromQuery = `"prop:d>=${start / 100}"`;
|
||||||
|
let tillQuery = `"prop:d<${(end + 1) / 100}"`;
|
||||||
|
if (end === 99) {
|
||||||
|
tillQuery = tillQuery.replace("<", "<=");
|
||||||
|
}
|
||||||
|
return `${fromQuery} AND ${tillQuery}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function getAdjustedScaleAndTicks(
|
||||||
|
min: number,
|
||||||
|
max: number,
|
||||||
|
desiredBars: number,
|
||||||
|
): [ScaleLinear<number, number, never>, number[]] {
|
||||||
|
const prescale = scaleLinear().domain([min, max]).nice();
|
||||||
|
const ticks = prescale.ticks(desiredBars);
|
||||||
|
|
||||||
|
const predomain = prescale.domain() as [number, number];
|
||||||
|
|
||||||
|
const minOffset = min - predomain[0];
|
||||||
|
const tickSize = ticks[1] - ticks[0];
|
||||||
|
|
||||||
|
if (minOffset === 0 || (minOffset % tickSize !== 0 && tickSize % minOffset !== 0)) {
|
||||||
|
return [prescale, ticks];
|
||||||
|
}
|
||||||
|
|
||||||
|
const add = (n: number): number => n + minOffset;
|
||||||
|
return [
|
||||||
|
scaleLinear().domain(predomain.map(add) as [number, number]),
|
||||||
|
ticks.map(add),
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
export function prepareData(
|
||||||
|
data: GraphData,
|
||||||
|
dispatch: SearchDispatch,
|
||||||
|
browserLinksSupported: boolean,
|
||||||
|
): [HistogramData | null, TableDatum[]] {
|
||||||
|
// get min/max
|
||||||
|
const allEases = data.eases;
|
||||||
|
if (!allEases.size) {
|
||||||
|
return [null, []];
|
||||||
|
}
|
||||||
|
const xMin = 0;
|
||||||
|
const xMax = 100;
|
||||||
|
const desiredBars = 20;
|
||||||
|
|
||||||
|
const [scale, ticks] = getAdjustedScaleAndTicks(xMin, xMax, desiredBars);
|
||||||
|
|
||||||
|
const bins = bin()
|
||||||
|
.value((m) => {
|
||||||
|
return m[0];
|
||||||
|
})
|
||||||
|
.domain(scale.domain() as [number, number])
|
||||||
|
.thresholds(ticks)(allEases.entries() as any);
|
||||||
|
const total = sum(bins as any, getNumericMapBinValue);
|
||||||
|
|
||||||
|
const colourScale = scaleSequential(interpolateRdYlGn).domain([100, 0]);
|
||||||
|
|
||||||
|
function hoverText(bin: Bin<number, number>, _percent: number): string {
|
||||||
|
const percent = `${bin.x0}%-${bin.x1}%`;
|
||||||
|
return tr.statisticsCardDifficultyTooltip({
|
||||||
|
cards: getNumericMapBinValue(bin as any),
|
||||||
|
percent,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function onClick(bin: Bin<number, number>): void {
|
||||||
|
const start = bin.x0!;
|
||||||
|
const end = bin.x1! - 1;
|
||||||
|
const query = makeQuery(start, end);
|
||||||
|
dispatch("search", { query });
|
||||||
|
}
|
||||||
|
|
||||||
|
const xTickFormat = (num: number): string => localizedNumber(num, 0) + "%";
|
||||||
|
const tableData = [
|
||||||
|
{
|
||||||
|
label: tr.statisticsAverageDifficulty(),
|
||||||
|
value: xTickFormat(sum(Array.from(allEases.entries()).map(([k, v]) => k * v)) / total),
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
scale,
|
||||||
|
bins,
|
||||||
|
total,
|
||||||
|
hoverText,
|
||||||
|
onClick: browserLinksSupported ? onClick : null,
|
||||||
|
colourScale,
|
||||||
|
showArea: false,
|
||||||
|
binValue: getNumericMapBinValue,
|
||||||
|
xTickFormat,
|
||||||
|
},
|
||||||
|
tableData,
|
||||||
|
];
|
||||||
|
}
|
|
@ -41,12 +41,14 @@ import AddedGraph from "./AddedGraph.svelte";
|
||||||
import ButtonsGraph from "./ButtonsGraph.svelte";
|
import ButtonsGraph from "./ButtonsGraph.svelte";
|
||||||
import CalendarGraph from "./CalendarGraph.svelte";
|
import CalendarGraph from "./CalendarGraph.svelte";
|
||||||
import CardCounts from "./CardCounts.svelte";
|
import CardCounts from "./CardCounts.svelte";
|
||||||
|
import DifficultyGraph from "./DifficultyGraph.svelte";
|
||||||
import EaseGraph from "./EaseGraph.svelte";
|
import EaseGraph from "./EaseGraph.svelte";
|
||||||
import FutureDue from "./FutureDue.svelte";
|
import FutureDue from "./FutureDue.svelte";
|
||||||
import { RevlogRange } from "./graph-helpers";
|
import { RevlogRange } from "./graph-helpers";
|
||||||
import HourGraph from "./HourGraph.svelte";
|
import HourGraph from "./HourGraph.svelte";
|
||||||
import IntervalsGraph from "./IntervalsGraph.svelte";
|
import IntervalsGraph from "./IntervalsGraph.svelte";
|
||||||
import RangeBox from "./RangeBox.svelte";
|
import RangeBox from "./RangeBox.svelte";
|
||||||
|
import RetrievabilityGraph from "./RetrievabilityGraph.svelte";
|
||||||
import ReviewsGraph from "./ReviewsGraph.svelte";
|
import ReviewsGraph from "./ReviewsGraph.svelte";
|
||||||
import TodayStats from "./TodayStats.svelte";
|
import TodayStats from "./TodayStats.svelte";
|
||||||
|
|
||||||
|
@ -59,6 +61,8 @@ setupGraphs(
|
||||||
CardCounts,
|
CardCounts,
|
||||||
IntervalsGraph,
|
IntervalsGraph,
|
||||||
EaseGraph,
|
EaseGraph,
|
||||||
|
DifficultyGraph,
|
||||||
|
RetrievabilityGraph,
|
||||||
HourGraph,
|
HourGraph,
|
||||||
ButtonsGraph,
|
ButtonsGraph,
|
||||||
AddedGraph,
|
AddedGraph,
|
||||||
|
@ -76,6 +80,8 @@ export const graphComponents = {
|
||||||
CardCounts,
|
CardCounts,
|
||||||
IntervalsGraph,
|
IntervalsGraph,
|
||||||
EaseGraph,
|
EaseGraph,
|
||||||
|
DifficultyGraph,
|
||||||
|
RetrievabilityGraph,
|
||||||
HourGraph,
|
HourGraph,
|
||||||
ButtonsGraph,
|
ButtonsGraph,
|
||||||
AddedGraph,
|
AddedGraph,
|
||||||
|
|
122
ts/graphs/retrievability.ts
Normal file
122
ts/graphs/retrievability.ts
Normal file
|
@ -0,0 +1,122 @@
|
||||||
|
// Copyright: Ankitects Pty Ltd and contributors
|
||||||
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
|
/* eslint
|
||||||
|
@typescript-eslint/no-explicit-any: "off",
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { GraphsResponse } from "@tslib/anki/stats_pb";
|
||||||
|
import * as tr from "@tslib/ftl";
|
||||||
|
import { localizedNumber } from "@tslib/i18n";
|
||||||
|
import type { Bin, ScaleLinear } from "d3";
|
||||||
|
import { bin, interpolateRdYlGn, scaleLinear, scaleSequential, sum } from "d3";
|
||||||
|
|
||||||
|
import type { SearchDispatch, TableDatum } from "./graph-helpers";
|
||||||
|
import { getNumericMapBinValue, numericMap } from "./graph-helpers";
|
||||||
|
import type { HistogramData } from "./histogram-graph";
|
||||||
|
|
||||||
|
export interface GraphData {
|
||||||
|
retrievability: Map<number, number>;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function gatherData(data: GraphsResponse): GraphData {
|
||||||
|
return { retrievability: numericMap(data.retrievability!.retrievability) };
|
||||||
|
}
|
||||||
|
|
||||||
|
function makeQuery(start: number, end: number): string {
|
||||||
|
const fromQuery = `"prop:r>=${start / 100}"`;
|
||||||
|
let tillQuery = `"prop:r<${(end + 1) / 100}"`;
|
||||||
|
if (end === 99) {
|
||||||
|
tillQuery = tillQuery.replace("<", "<=");
|
||||||
|
}
|
||||||
|
return `${fromQuery} AND ${tillQuery}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function getAdjustedScaleAndTicks(
|
||||||
|
min: number,
|
||||||
|
max: number,
|
||||||
|
desiredBars: number,
|
||||||
|
): [ScaleLinear<number, number, never>, number[]] {
|
||||||
|
const prescale = scaleLinear().domain([min, max]).nice();
|
||||||
|
const ticks = prescale.ticks(desiredBars);
|
||||||
|
|
||||||
|
const predomain = prescale.domain() as [number, number];
|
||||||
|
|
||||||
|
const minOffset = min - predomain[0];
|
||||||
|
const tickSize = ticks[1] - ticks[0];
|
||||||
|
|
||||||
|
if (minOffset === 0 || (minOffset % tickSize !== 0 && tickSize % minOffset !== 0)) {
|
||||||
|
return [prescale, ticks];
|
||||||
|
}
|
||||||
|
|
||||||
|
const add = (n: number): number => n + minOffset;
|
||||||
|
return [
|
||||||
|
scaleLinear().domain(predomain.map(add) as [number, number]),
|
||||||
|
ticks.map(add),
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
export function prepareData(
|
||||||
|
data: GraphData,
|
||||||
|
dispatch: SearchDispatch,
|
||||||
|
browserLinksSupported: boolean,
|
||||||
|
): [HistogramData | null, TableDatum[]] {
|
||||||
|
// get min/max
|
||||||
|
const allEases = data.retrievability;
|
||||||
|
if (!allEases.size) {
|
||||||
|
return [null, []];
|
||||||
|
}
|
||||||
|
const xMin = 0;
|
||||||
|
const xMax = 100;
|
||||||
|
const desiredBars = 20;
|
||||||
|
|
||||||
|
const [scale, ticks] = getAdjustedScaleAndTicks(xMin, xMax, desiredBars);
|
||||||
|
|
||||||
|
const bins = bin()
|
||||||
|
.value((m) => {
|
||||||
|
return m[0];
|
||||||
|
})
|
||||||
|
.domain(scale.domain() as [number, number])
|
||||||
|
.thresholds(ticks)(allEases.entries() as any);
|
||||||
|
const total = sum(bins as any, getNumericMapBinValue);
|
||||||
|
|
||||||
|
const colourScale = scaleSequential(interpolateRdYlGn).domain([0, 100]);
|
||||||
|
|
||||||
|
function hoverText(bin: Bin<number, number>, _percent: number): string {
|
||||||
|
const percent = `${bin.x0}%-${bin.x1}%`;
|
||||||
|
return tr.statisticsRetrievabilityTooltip({
|
||||||
|
cards: getNumericMapBinValue(bin as any),
|
||||||
|
percent,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function onClick(bin: Bin<number, number>): void {
|
||||||
|
const start = bin.x0!;
|
||||||
|
const end = bin.x1! - 1;
|
||||||
|
const query = makeQuery(start, end);
|
||||||
|
dispatch("search", { query });
|
||||||
|
}
|
||||||
|
|
||||||
|
const xTickFormat = (num: number): string => localizedNumber(num, 0) + "%";
|
||||||
|
const tableData = [
|
||||||
|
{
|
||||||
|
label: tr.statisticsAverageRetrievability(),
|
||||||
|
value: xTickFormat(sum(Array.from(allEases.entries()).map(([k, v]) => k * v)) / total),
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
scale,
|
||||||
|
bins,
|
||||||
|
total,
|
||||||
|
hoverText,
|
||||||
|
onClick: browserLinksSupported ? onClick : null,
|
||||||
|
colourScale,
|
||||||
|
showArea: false,
|
||||||
|
binValue: getNumericMapBinValue,
|
||||||
|
xTickFormat,
|
||||||
|
},
|
||||||
|
tableData,
|
||||||
|
];
|
||||||
|
}
|
|
@ -146,9 +146,12 @@ function i18nFuncForUnit(
|
||||||
If precise is true, show to two decimal places, eg
|
If precise is true, show to two decimal places, eg
|
||||||
eg 70 seconds -> "1.17 minutes"
|
eg 70 seconds -> "1.17 minutes"
|
||||||
If false, seconds and days are shown without decimals. */
|
If false, seconds and days are shown without decimals. */
|
||||||
export function timeSpan(seconds: number, short = false): string {
|
export function timeSpan(seconds: number, short = false, precise = true): string {
|
||||||
const unit = naturalUnit(seconds);
|
const unit = naturalUnit(seconds);
|
||||||
const amount = unitAmount(unit, seconds);
|
let amount = unitAmount(unit, seconds);
|
||||||
|
if (!precise && unit < TimespanUnit.Months) {
|
||||||
|
amount = Math.round(amount);
|
||||||
|
}
|
||||||
return i18nFuncForUnit(unit, short)({ amount });
|
return i18nFuncForUnit(unit, short)({ amount });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue