mirror of
https://github.com/ankitects/anki.git
synced 2025-09-25 09:16:38 -04:00
Compare commits
279 commits
packaging-
...
main
Author | SHA1 | Date | |
---|---|---|---|
![]() |
436590f4c2 | ||
![]() |
c56e6e55ec | ||
![]() |
04a0b10a15 | ||
![]() |
99c67d39cb | ||
![]() |
0d31c6de4a | ||
![]() |
fb332c4fe1 | ||
![]() |
48f774c711 | ||
![]() |
3890e12c9e | ||
![]() |
80cff16250 | ||
![]() |
75d9026be5 | ||
![]() |
6854d13b88 | ||
![]() |
29072654db | ||
![]() |
ec6f09958a | ||
![]() |
c2957746f4 | ||
![]() |
9e415869b8 | ||
![]() |
7e8a1076c1 | ||
![]() |
b97fb45e06 | ||
![]() |
61094d387a | ||
![]() |
90ed4cc115 | ||
![]() |
4506ad0c97 | ||
![]() |
539054c34d | ||
![]() |
cf12c201d8 | ||
![]() |
3b0297d14d | ||
![]() |
58deb14028 | ||
![]() |
5c4d2e87a1 | ||
![]() |
6d31776c25 | ||
![]() |
dda730dfa2 | ||
![]() |
08431106da | ||
![]() |
b4b1c2013f | ||
![]() |
5280cb2f1c | ||
![]() |
b2ab0c0830 | ||
![]() |
6a985c9fb0 | ||
![]() |
db1d04f622 | ||
![]() |
2491eb0316 | ||
![]() |
06f9d41a96 | ||
![]() |
8d5c385c76 | ||
![]() |
153b972dfd | ||
![]() |
4ac80061ca | ||
![]() |
01b825f7c6 | ||
![]() |
157da4c7a7 | ||
![]() |
8ef208e418 | ||
![]() |
65ea013270 | ||
![]() |
ef1a1deb9c | ||
![]() |
c93e11f343 | ||
![]() |
e3d0a30443 | ||
![]() |
4fdb4983dd | ||
![]() |
3521da3ad6 | ||
![]() |
ca60911e19 | ||
![]() |
71ec878780 | ||
![]() |
6dd9daf074 | ||
![]() |
3b33d20849 | ||
![]() |
542c557404 | ||
![]() |
211cbfe660 | ||
![]() |
359231a4d8 | ||
![]() |
d23764b59e | ||
![]() |
1dc31bb360 | ||
![]() |
6fa33777db | ||
![]() |
2fee6f959b | ||
![]() |
3d0a408a2b | ||
![]() |
3e846c8756 | ||
![]() |
79932aad41 | ||
![]() |
2879dc63c3 | ||
![]() |
b92eabf4ae | ||
![]() |
1660a22548 | ||
![]() |
a3b3b0850d | ||
![]() |
562cef1f22 | ||
![]() |
e676e1a484 | ||
![]() |
37f7872565 | ||
![]() |
5c07c899ec | ||
![]() |
054740dd14 | ||
![]() |
78a3b3ef7b | ||
![]() |
f3b4284afb | ||
![]() |
fb2e2bd37a | ||
![]() |
a0c1a398f4 | ||
![]() |
d4862e99da | ||
![]() |
34ed674869 | ||
![]() |
8c7cd80245 | ||
![]() |
68bc4c02cf | ||
![]() |
f4266f0142 | ||
![]() |
d3e8dc6dbf | ||
![]() |
5462d99255 | ||
![]() |
2d60471f36 | ||
![]() |
62e01fe03a | ||
![]() |
5c6e2188e2 | ||
![]() |
ab55440a05 | ||
![]() |
aae9f53e79 | ||
![]() |
a77ffbf4a5 | ||
![]() |
402008950c | ||
![]() |
f7e6e9cb0d | ||
![]() |
2b55882cce | ||
![]() |
0d0c42c6d9 | ||
![]() |
b76918a217 | ||
![]() |
f7974568c9 | ||
![]() |
d13c117e80 | ||
![]() |
8932199513 | ||
![]() |
69d54864a8 | ||
![]() |
baeccfa3e4 | ||
![]() |
e99682a277 | ||
![]() |
4dc00556c1 | ||
![]() |
3dc6b6b3ca | ||
![]() |
c947690aeb | ||
![]() |
1af3c58d40 | ||
![]() |
46bcf4efa6 | ||
![]() |
60750f8e4c | ||
![]() |
661f78557f | ||
![]() |
7172b2d266 | ||
![]() |
78c6db2023 | ||
![]() |
e2692b5ac9 | ||
![]() |
177c483398 | ||
![]() |
20b7bb66db | ||
![]() |
ca0459d8ee | ||
![]() |
e511d63b7e | ||
![]() |
d6e49f8ea5 | ||
![]() |
416e7af02b | ||
![]() |
c74a97a5fa | ||
![]() |
00bc0354c9 | ||
![]() |
aee71afebe | ||
![]() |
ef69f424c1 | ||
![]() |
19f9afba64 | ||
![]() |
229337dbe0 | ||
![]() |
1f3d03f7f8 | ||
![]() |
47c1094195 | ||
![]() |
35a889e1ed | ||
![]() |
65b5aefd07 | ||
![]() |
8c72b03f4c | ||
![]() |
fc845a11a9 | ||
![]() |
aeaf001df7 | ||
![]() |
a3da224511 | ||
![]() |
63ddd0e183 | ||
![]() |
278a84f8d2 | ||
![]() |
0b30155c90 | ||
![]() |
37fe704326 | ||
![]() |
e77cd791de | ||
![]() |
4e29440d6a | ||
![]() |
cc4b0a825e | ||
![]() |
15bbcdd568 | ||
![]() |
12635f4cd2 | ||
![]() |
834fb41015 | ||
![]() |
5a19027185 | ||
![]() |
0375b4aac0 | ||
![]() |
a1934ae9e4 | ||
![]() |
58a8aa7353 | ||
![]() |
4604bc7567 | ||
![]() |
3b18097550 | ||
![]() |
c56fd3ee28 | ||
![]() |
f4e587256c | ||
![]() |
51cf09daf3 | ||
![]() |
dfbb7302e8 | ||
![]() |
1f7f7bc8a3 | ||
![]() |
208729fa3e | ||
![]() |
6744a0a31a | ||
![]() |
1ad82ea8b5 | ||
![]() |
1098d9ac2a | ||
![]() |
778ab76586 | ||
![]() |
c57b7c496d | ||
![]() |
fabed12f4b | ||
![]() |
84658e9cec | ||
![]() |
11c3e60615 | ||
![]() |
3d9fbfd97f | ||
![]() |
80ff9a120c | ||
![]() |
037dfa1bc1 | ||
![]() |
3adcf05ca6 | ||
![]() |
3bd725b6be | ||
![]() |
0009e798e1 | ||
![]() |
436a1d78bc | ||
![]() |
2e74101ca4 | ||
![]() |
7fe201d6bd | ||
![]() |
8a3b72e6e5 | ||
![]() |
d3e1fd1f80 | ||
![]() |
3d6b4761e4 | ||
![]() |
1ca31413f7 | ||
![]() |
b205008a5e | ||
![]() |
b16439fc9c | ||
![]() |
f927aa5788 | ||
![]() |
a83a6b5928 | ||
![]() |
052b9231ec | ||
![]() |
1b51c16e26 | ||
![]() |
de2d1477de | ||
![]() |
ccc0c7cdbb | ||
![]() |
deaf25f757 | ||
![]() |
93dbd6e4cf | ||
![]() |
7b0289b5d3 | ||
![]() |
08a8b6691c | ||
![]() |
fc6447a938 | ||
![]() |
d1793550b0 | ||
![]() |
cedece5cae | ||
![]() |
2594dcb2bb | ||
![]() |
09495d3a8b | ||
![]() |
f5285f359a | ||
![]() |
fba1d7b4b0 | ||
![]() |
4232185735 | ||
![]() |
0b5218706a | ||
![]() |
bb1b289690 | ||
![]() |
e81a7e8b1a | ||
![]() |
da90705346 | ||
![]() |
9e1690774c | ||
![]() |
ee5e8c9230 | ||
![]() |
b6c70f7b75 | ||
![]() |
944e453419 | ||
![]() |
14eb297bbf | ||
![]() |
a07370f565 | ||
![]() |
bf36e10519 | ||
![]() |
b22b3310d6 | ||
![]() |
7720c7de1a | ||
![]() |
0be87b887e | ||
![]() |
bce3cabf9b | ||
![]() |
ad34b76fa9 | ||
![]() |
e0727b1bc8 | ||
![]() |
18cac8bbe5 | ||
![]() |
045e571edf | ||
![]() |
469fd763c7 | ||
![]() |
6eb1db0f5d | ||
![]() |
349a696f93 | ||
![]() |
66f34da7ef | ||
![]() |
3d7dc32777 | ||
![]() |
58dfb9cdd3 | ||
![]() |
185fdebb63 | ||
![]() |
0739ea58f8 | ||
![]() |
5c23ac5a86 | ||
![]() |
f94d05bcbe | ||
![]() |
b8963b463e | ||
![]() |
bdb3c714dc | ||
![]() |
731e7d5b5c | ||
![]() |
f89ab00236 | ||
![]() |
b872852afe | ||
![]() |
aa8dfe1cf4 | ||
![]() |
f5073b402a | ||
![]() |
a587343f29 | ||
![]() |
cfeb71724d | ||
![]() |
aae970bed9 | ||
![]() |
63dcfde005 | ||
![]() |
fe5dfe9ec2 | ||
![]() |
5f73725a64 | ||
![]() |
ad0dbb563a | ||
![]() |
e505ca032b | ||
![]() |
fdce765861 | ||
![]() |
ae6cf98f40 | ||
![]() |
bedab0a54b | ||
![]() |
de7de82f76 | ||
![]() |
73edf23954 | ||
![]() |
9b287dc51a | ||
![]() |
7edd9221ac | ||
![]() |
630bdd3189 | ||
![]() |
992fb054bd | ||
![]() |
d6f93fab76 | ||
![]() |
06195d1268 | ||
![]() |
a73f1507ba | ||
![]() |
b250a2f724 | ||
![]() |
d2f818fad2 | ||
![]() |
eb6c977e08 | ||
![]() |
782645d92e | ||
![]() |
246fa75a35 | ||
![]() |
cfd448565a | ||
![]() |
5cc3a2276b | ||
![]() |
c28306eb94 | ||
![]() |
88538d8bad | ||
![]() |
cc395f7c44 | ||
![]() |
a4c95f5fbd | ||
![]() |
b781dfabf5 | ||
![]() |
718f39fdf3 | ||
![]() |
b2dc5a0263 | ||
![]() |
d542ae9065 | ||
![]() |
cd71931506 | ||
![]() |
4abc0eb8b8 | ||
![]() |
a60a955c61 | ||
![]() |
a41c60c016 | ||
![]() |
8e20973c52 | ||
![]() |
cd411927cc | ||
![]() |
344cac1ef4 | ||
![]() |
f98f620116 | ||
![]() |
04996c77f3 | ||
![]() |
bbf533b172 | ||
![]() |
ba0d590c16 | ||
![]() |
a63e4ef1c8 | ||
![]() |
c5eb00cb42 | ||
![]() |
44f3bbbbc9 | ||
![]() |
4040a3c1f9 | ||
![]() |
669312d5eb | ||
![]() |
b4cee124c0 |
424 changed files with 12929 additions and 11192 deletions
|
@ -10,3 +10,6 @@ PYTHONDONTWRITEBYTECODE = "1" # prevent junk files on Windows
|
||||||
|
|
||||||
[term]
|
[term]
|
||||||
color = "always"
|
color = "always"
|
||||||
|
|
||||||
|
[target.'cfg(all(target_env = "msvc", target_os = "windows"))']
|
||||||
|
rustflags = ["-C", "target-feature=+crt-static"]
|
||||||
|
|
|
@ -5,9 +5,6 @@
|
||||||
db-path = "~/.cargo/advisory-db"
|
db-path = "~/.cargo/advisory-db"
|
||||||
db-urls = ["https://github.com/rustsec/advisory-db"]
|
db-urls = ["https://github.com/rustsec/advisory-db"]
|
||||||
ignore = [
|
ignore = [
|
||||||
# pyoxidizer is stuck on an old ring version
|
|
||||||
"RUSTSEC-2025-0009",
|
|
||||||
"RUSTSEC-2025-0010",
|
|
||||||
# burn depends on an unmaintained package 'paste'
|
# burn depends on an unmaintained package 'paste'
|
||||||
"RUSTSEC-2024-0436",
|
"RUSTSEC-2024-0436",
|
||||||
]
|
]
|
||||||
|
@ -17,12 +14,11 @@ allow = [
|
||||||
"MIT",
|
"MIT",
|
||||||
"Apache-2.0",
|
"Apache-2.0",
|
||||||
"Apache-2.0 WITH LLVM-exception",
|
"Apache-2.0 WITH LLVM-exception",
|
||||||
|
"CDLA-Permissive-2.0",
|
||||||
"ISC",
|
"ISC",
|
||||||
"MPL-2.0",
|
"MPL-2.0",
|
||||||
"Unicode-DFS-2016",
|
|
||||||
"BSD-2-Clause",
|
"BSD-2-Clause",
|
||||||
"BSD-3-Clause",
|
"BSD-3-Clause",
|
||||||
"OpenSSL",
|
|
||||||
"CC0-1.0",
|
"CC0-1.0",
|
||||||
"Unlicense",
|
"Unlicense",
|
||||||
"Zlib",
|
"Zlib",
|
||||||
|
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -18,3 +18,5 @@ node_modules
|
||||||
yarn-error.log
|
yarn-error.log
|
||||||
ts/.svelte-kit
|
ts/.svelte-kit
|
||||||
.yarn
|
.yarn
|
||||||
|
.claude/settings.local.json
|
||||||
|
.claude/user.md
|
||||||
|
|
|
@ -1,4 +0,0 @@
|
||||||
[settings]
|
|
||||||
py_version=39
|
|
||||||
known_first_party=anki,aqt,tests
|
|
||||||
profile=black
|
|
48
.pylintrc
48
.pylintrc
|
@ -1,48 +0,0 @@
|
||||||
[MASTER]
|
|
||||||
ignore-patterns=.*_pb2.*
|
|
||||||
persistent = no
|
|
||||||
extension-pkg-whitelist=orjson,PyQt6
|
|
||||||
init-hook="import sys; sys.path.extend(['pylib/anki/_vendor', 'out/qt'])"
|
|
||||||
|
|
||||||
[REPORTS]
|
|
||||||
output-format=colorized
|
|
||||||
|
|
||||||
[MESSAGES CONTROL]
|
|
||||||
disable=
|
|
||||||
R,
|
|
||||||
line-too-long,
|
|
||||||
too-many-lines,
|
|
||||||
missing-function-docstring,
|
|
||||||
missing-module-docstring,
|
|
||||||
missing-class-docstring,
|
|
||||||
import-outside-toplevel,
|
|
||||||
wrong-import-position,
|
|
||||||
wrong-import-order,
|
|
||||||
fixme,
|
|
||||||
unused-wildcard-import,
|
|
||||||
attribute-defined-outside-init,
|
|
||||||
redefined-builtin,
|
|
||||||
wildcard-import,
|
|
||||||
broad-except,
|
|
||||||
bare-except,
|
|
||||||
unused-argument,
|
|
||||||
unused-variable,
|
|
||||||
redefined-outer-name,
|
|
||||||
global-statement,
|
|
||||||
protected-access,
|
|
||||||
arguments-differ,
|
|
||||||
arguments-renamed,
|
|
||||||
consider-using-f-string,
|
|
||||||
invalid-name,
|
|
||||||
broad-exception-raised
|
|
||||||
|
|
||||||
[BASIC]
|
|
||||||
good-names =
|
|
||||||
id,
|
|
||||||
tr,
|
|
||||||
db,
|
|
||||||
ok,
|
|
||||||
ip,
|
|
||||||
|
|
||||||
[IMPORTS]
|
|
||||||
ignored-modules = anki.*_pb2, anki.sync_pb2, win32file,pywintypes,socket,win32pipe,pyaudio,anki.scheduler_pb2,anki.notetypes_pb2
|
|
|
@ -1 +1 @@
|
||||||
3.9.23
|
3.13.5
|
||||||
|
|
93
.ruff.toml
93
.ruff.toml
|
@ -1,2 +1,91 @@
|
||||||
target-version = "py39"
|
lint.select = [
|
||||||
extend-exclude = []
|
"E", # pycodestyle errors
|
||||||
|
"F", # Pyflakes errors
|
||||||
|
"PL", # Pylint rules
|
||||||
|
"I", # Isort rules
|
||||||
|
"ARG",
|
||||||
|
# "UP", # pyupgrade
|
||||||
|
# "B", # flake8-bugbear
|
||||||
|
# "SIM", # flake8-simplify
|
||||||
|
]
|
||||||
|
|
||||||
|
extend-exclude = ["*_pb2.py", "*_pb2.pyi"]
|
||||||
|
|
||||||
|
lint.ignore = [
|
||||||
|
# Docstring rules (missing-*-docstring in pylint)
|
||||||
|
"D100", # Missing docstring in public module
|
||||||
|
"D101", # Missing docstring in public class
|
||||||
|
"D103", # Missing docstring in public function
|
||||||
|
|
||||||
|
# Import rules (wrong-import-* in pylint)
|
||||||
|
"E402", # Module level import not at top of file
|
||||||
|
"E501", # Line too long
|
||||||
|
|
||||||
|
# pycodestyle rules
|
||||||
|
"E741", # ambiguous-variable-name
|
||||||
|
|
||||||
|
# Comment rules (fixme in pylint)
|
||||||
|
"FIX002", # Line contains TODO
|
||||||
|
|
||||||
|
# Pyflakes rules
|
||||||
|
"F402", # import-shadowed-by-loop-var
|
||||||
|
"F403", # undefined-local-with-import-star
|
||||||
|
"F405", # undefined-local-with-import-star-usage
|
||||||
|
|
||||||
|
# Naming rules (invalid-name in pylint)
|
||||||
|
"N801", # Class name should use CapWords convention
|
||||||
|
"N802", # Function name should be lowercase
|
||||||
|
"N803", # Argument name should be lowercase
|
||||||
|
"N806", # Variable in function should be lowercase
|
||||||
|
"N811", # Constant imported as non-constant
|
||||||
|
"N812", # Lowercase imported as non-lowercase
|
||||||
|
"N813", # Camelcase imported as lowercase
|
||||||
|
"N814", # Camelcase imported as constant
|
||||||
|
"N815", # Variable in class scope should not be mixedCase
|
||||||
|
"N816", # Variable in global scope should not be mixedCase
|
||||||
|
"N817", # CamelCase imported as acronym
|
||||||
|
"N818", # Error suffix in exception names
|
||||||
|
|
||||||
|
# Pylint rules
|
||||||
|
"PLW0603", # global-statement
|
||||||
|
"PLW2901", # redefined-loop-name
|
||||||
|
"PLC0415", # import-outside-top-level
|
||||||
|
"PLR2004", # magic-value-comparison
|
||||||
|
|
||||||
|
# Exception handling (broad-except, bare-except in pylint)
|
||||||
|
"BLE001", # Do not catch blind exception
|
||||||
|
|
||||||
|
# Argument rules (unused-argument in pylint)
|
||||||
|
"ARG001", # Unused function argument
|
||||||
|
"ARG002", # Unused method argument
|
||||||
|
"ARG005", # Unused lambda argument
|
||||||
|
|
||||||
|
# Access rules (protected-access in pylint)
|
||||||
|
"SLF001", # Private member accessed
|
||||||
|
|
||||||
|
# String formatting (consider-using-f-string in pylint)
|
||||||
|
"UP032", # Use f-string instead of format call
|
||||||
|
|
||||||
|
# Exception rules (broad-exception-raised in pylint)
|
||||||
|
"TRY301", # Abstract raise to an inner function
|
||||||
|
|
||||||
|
# Builtin shadowing (redefined-builtin in pylint)
|
||||||
|
"A001", # Variable shadows a Python builtin
|
||||||
|
"A002", # Argument shadows a Python builtin
|
||||||
|
"A003", # Class attribute shadows a Python builtin
|
||||||
|
]
|
||||||
|
|
||||||
|
[lint.per-file-ignores]
|
||||||
|
"**/anki/*_pb2.py" = ["ALL"]
|
||||||
|
|
||||||
|
[lint.pep8-naming]
|
||||||
|
ignore-names = ["id", "tr", "db", "ok", "ip"]
|
||||||
|
|
||||||
|
[lint.pylint]
|
||||||
|
max-args = 12
|
||||||
|
max-returns = 10
|
||||||
|
max-branches = 35
|
||||||
|
max-statements = 125
|
||||||
|
|
||||||
|
[lint.isort]
|
||||||
|
known-first-party = ["anki", "aqt", "tests"]
|
||||||
|
|
2
.version
2
.version
|
@ -1 +1 @@
|
||||||
25.06
|
25.09.2
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
"recommendations": [
|
"recommendations": [
|
||||||
"dprint.dprint",
|
"dprint.dprint",
|
||||||
"ms-python.python",
|
"ms-python.python",
|
||||||
"ms-python.black-formatter",
|
"charliermarsh.ruff",
|
||||||
"rust-lang.rust-analyzer",
|
"rust-lang.rust-analyzer",
|
||||||
"svelte.svelte-vscode",
|
"svelte.svelte-vscode",
|
||||||
"zxh404.vscode-proto3",
|
"zxh404.vscode-proto3",
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
"out/qt",
|
"out/qt",
|
||||||
"qt"
|
"qt"
|
||||||
],
|
],
|
||||||
"python.formatting.provider": "black",
|
"python.formatting.provider": "charliermarsh.ruff",
|
||||||
"python.linting.mypyEnabled": false,
|
"python.linting.mypyEnabled": false,
|
||||||
"python.analysis.diagnosticSeverityOverrides": {
|
"python.analysis.diagnosticSeverityOverrides": {
|
||||||
"reportMissingModuleSource": "none"
|
"reportMissingModuleSource": "none"
|
||||||
|
|
|
@ -1 +1,2 @@
|
||||||
nodeLinker: node-modules
|
nodeLinker: node-modules
|
||||||
|
enableScripts: false
|
||||||
|
|
86
CLAUDE.md
Normal file
86
CLAUDE.md
Normal file
|
@ -0,0 +1,86 @@
|
||||||
|
# Claude Code Configuration
|
||||||
|
|
||||||
|
## Project Overview
|
||||||
|
|
||||||
|
Anki is a spaced repetition flashcard program with a multi-layered architecture. Main components:
|
||||||
|
|
||||||
|
- Web frontend: Svelte/TypeScript in ts/
|
||||||
|
- PyQt GUI, which embeds the web components in aqt/
|
||||||
|
- Python library which wraps our rust Layer (pylib/, with Rust module in pylib/rsbridge)
|
||||||
|
- Core Rust layer in rslib/
|
||||||
|
- Protobuf definitions in proto/ that are used by the different layers to
|
||||||
|
talk to each other.
|
||||||
|
|
||||||
|
## Building/checking
|
||||||
|
|
||||||
|
./check (check.bat) will format the code and run the main build & checks.
|
||||||
|
Please do this as a final step before marking a task as completed.
|
||||||
|
|
||||||
|
## Quick iteration
|
||||||
|
|
||||||
|
During development, you can build/check subsections of our code:
|
||||||
|
|
||||||
|
- Rust: 'cargo check'
|
||||||
|
- Python: './tools/dmypy', and if wheel-related, './ninja wheels'
|
||||||
|
- TypeScript/Svelte: './ninja check:svelte'
|
||||||
|
|
||||||
|
Be mindful that some changes (such as modifications to .proto files) may
|
||||||
|
need a full build with './check' first.
|
||||||
|
|
||||||
|
## Build tooling
|
||||||
|
|
||||||
|
'./check' and './ninja' invoke our build system, which is implemented in build/. It takes care of downloading required deps and invoking our build
|
||||||
|
steps.
|
||||||
|
|
||||||
|
## Translations
|
||||||
|
|
||||||
|
ftl/ contains our Fluent translation files. We have scripts in rslib/i18n
|
||||||
|
to auto-generate an API for Rust, TypeScript and Python so that our code can
|
||||||
|
access the translations in a type-safe manner. Changes should be made to
|
||||||
|
ftl/core or ftl/qt. Except for features specific to our Qt interface, prefer
|
||||||
|
the core module. When adding new strings, confirm the appropriate ftl file
|
||||||
|
first, and try to match the existing style.
|
||||||
|
|
||||||
|
## Protobuf and IPC
|
||||||
|
|
||||||
|
Our build scripts use the .proto files to define our Rust library's
|
||||||
|
non-Rust API. pylib/rsbridge exposes that API, and _backend.py exposes
|
||||||
|
snake_case methods for each protobuf RPC that call into the API.
|
||||||
|
Similar tooling creates a @generated/backend TypeScript module for
|
||||||
|
communicating with the Rust backend (which happens over POST requests).
|
||||||
|
|
||||||
|
## Fixing errors
|
||||||
|
|
||||||
|
When dealing with build errors or failing tests, invoke 'check' or one
|
||||||
|
of the quick iteration commands regularly. This helps verify your changes
|
||||||
|
are correct. To locate other instances of a problem, run the check again -
|
||||||
|
don't attempt to grep the codebase.
|
||||||
|
|
||||||
|
## Ignores
|
||||||
|
|
||||||
|
The files in out/ are auto-generated. Mostly you should ignore that folder,
|
||||||
|
though you may sometimes find it useful to view out/{pylib/anki,qt/_aqt,ts/lib/generated} when dealing with cross-language communication or our other generated sourcecode.
|
||||||
|
|
||||||
|
## Launcher/installer
|
||||||
|
|
||||||
|
The code for our launcher is in qt/launcher, with separate code for each
|
||||||
|
platform.
|
||||||
|
|
||||||
|
## Rust dependencies
|
||||||
|
|
||||||
|
Prefer adding to the root workspace, and using dep.workspace = true in the individual Rust project.
|
||||||
|
|
||||||
|
## Rust utilities
|
||||||
|
|
||||||
|
rslib/{process,io} contain some helpers for file and process operations,
|
||||||
|
which provide better error messages/context and some ergonomics. Use them
|
||||||
|
when possible.
|
||||||
|
|
||||||
|
## Rust error handling
|
||||||
|
|
||||||
|
in rslib, use error/mod.rs's AnkiError/Result and snafu. In our other Rust modules, prefer anyhow + additional context where appropriate. Unwrapping
|
||||||
|
in build scripts/tests is fine.
|
||||||
|
|
||||||
|
## Individual preferences
|
||||||
|
|
||||||
|
See @.claude/user.md
|
12
CONTRIBUTORS
12
CONTRIBUTORS
|
@ -49,6 +49,7 @@ Sander Santema <github.com/sandersantema/>
|
||||||
Thomas Brownback <https://github.com/brownbat/>
|
Thomas Brownback <https://github.com/brownbat/>
|
||||||
Andrew Gaul <andrew@gaul.org>
|
Andrew Gaul <andrew@gaul.org>
|
||||||
kenden
|
kenden
|
||||||
|
Emil Hamrin <github.com/e-hamrin>
|
||||||
Nickolay Yudin <kelciour@gmail.com>
|
Nickolay Yudin <kelciour@gmail.com>
|
||||||
neitrinoweb <github.com/neitrinoweb/>
|
neitrinoweb <github.com/neitrinoweb/>
|
||||||
Andreas Reis <github.com/nwwt>
|
Andreas Reis <github.com/nwwt>
|
||||||
|
@ -63,6 +64,7 @@ Jakub Kaczmarzyk <jakub.kaczmarzyk@gmail.com>
|
||||||
Akshara Balachandra <akshara.bala.18@gmail.com>
|
Akshara Balachandra <akshara.bala.18@gmail.com>
|
||||||
lukkea <github.com/lukkea/>
|
lukkea <github.com/lukkea/>
|
||||||
David Allison <davidallisongithub@gmail.com>
|
David Allison <davidallisongithub@gmail.com>
|
||||||
|
David Allison <62114487+david-allison@users.noreply.github.com>
|
||||||
Tsung-Han Yu <johan456789@gmail.com>
|
Tsung-Han Yu <johan456789@gmail.com>
|
||||||
Piotr Kubowicz <piotr.kubowicz@gmail.com>
|
Piotr Kubowicz <piotr.kubowicz@gmail.com>
|
||||||
RumovZ <gp5glkw78@relay.firefox.com>
|
RumovZ <gp5glkw78@relay.firefox.com>
|
||||||
|
@ -232,6 +234,16 @@ Spiritual Father <https://github.com/spiritualfather>
|
||||||
Emmanuel Ferdman <https://github.com/emmanuel-ferdman>
|
Emmanuel Ferdman <https://github.com/emmanuel-ferdman>
|
||||||
Sunong2008 <https://github.com/Sunrongguo2008>
|
Sunong2008 <https://github.com/Sunrongguo2008>
|
||||||
Marvin Kopf <marvinkopf@outlook.com>
|
Marvin Kopf <marvinkopf@outlook.com>
|
||||||
|
Kevin Nakamura <grinkers@grinkers.net>
|
||||||
|
Bradley Szoke <bradleyszoke@gmail.com>
|
||||||
|
jcznk <https://github.com/jcznk>
|
||||||
|
Thomas Rixen <thomas.rixen@student.uclouvain.be>
|
||||||
|
Siyuan Mattuwu Yan <syan4@ualberta.ca>
|
||||||
|
Lee Doughty <32392044+leedoughty@users.noreply.github.com>
|
||||||
|
memchr <memchr@proton.me>
|
||||||
|
Max Romanowski <maxr777@proton.me>
|
||||||
|
Aldlss <ayaldlss@gmail.com>
|
||||||
|
|
||||||
********************
|
********************
|
||||||
|
|
||||||
The text of the 3 clause BSD license follows:
|
The text of the 3 clause BSD license follows:
|
||||||
|
|
2108
Cargo.lock
generated
2108
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
133
Cargo.toml
133
Cargo.toml
|
@ -33,9 +33,8 @@ git = "https://github.com/ankitects/linkcheck.git"
|
||||||
rev = "184b2ca50ed39ca43da13f0b830a463861adb9ca"
|
rev = "184b2ca50ed39ca43da13f0b830a463861adb9ca"
|
||||||
|
|
||||||
[workspace.dependencies.fsrs]
|
[workspace.dependencies.fsrs]
|
||||||
version = "4.0.0"
|
version = "5.1.0"
|
||||||
# git = "https://github.com/open-spaced-repetition/fsrs-rs.git"
|
# git = "https://github.com/open-spaced-repetition/fsrs-rs.git"
|
||||||
# rev = "a7f7efc10f0a26b14ee348cc7402155685f2a24f"
|
|
||||||
# path = "../open-spaced-repetition/fsrs-rs"
|
# path = "../open-spaced-repetition/fsrs-rs"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
|
@ -52,103 +51,101 @@ ninja_gen = { "path" = "build/ninja_gen" }
|
||||||
unicase = "=2.6.0" # any changes could invalidate sqlite indexes
|
unicase = "=2.6.0" # any changes could invalidate sqlite indexes
|
||||||
|
|
||||||
# normal
|
# normal
|
||||||
ammonia = "4.0.0"
|
ammonia = "4.1.2"
|
||||||
anyhow = "1.0.90"
|
anyhow = "1.0.98"
|
||||||
apple-bundles = "0.17.0"
|
async-compression = { version = "0.4.24", features = ["zstd", "tokio"] }
|
||||||
async-compression = { version = "0.4.17", features = ["zstd", "tokio"] }
|
|
||||||
async-stream = "0.3.6"
|
async-stream = "0.3.6"
|
||||||
async-trait = "0.1.83"
|
async-trait = "0.1.88"
|
||||||
axum = { version = "0.7", features = ["multipart", "macros"] }
|
axum = { version = "0.8.4", features = ["multipart", "macros"] }
|
||||||
axum-client-ip = "0.6"
|
axum-client-ip = "1.1.3"
|
||||||
axum-extra = { version = "0.9.4", features = ["typed-header"] }
|
axum-extra = { version = "0.10.1", features = ["typed-header"] }
|
||||||
blake3 = "1.5.4"
|
bitflags = "2.9.1"
|
||||||
bytes = "1.7.2"
|
blake3 = "1.8.2"
|
||||||
camino = "1.1.9"
|
bytes = "1.10.1"
|
||||||
chrono = { version = "0.4.38", default-features = false, features = ["std", "clock"] }
|
camino = "1.1.10"
|
||||||
clap = { version = "4.5.20", features = ["derive"] }
|
chrono = { version = "0.4.41", default-features = false, features = ["std", "clock"] }
|
||||||
coarsetime = "0.1.34"
|
clap = { version = "4.5.40", features = ["derive"] }
|
||||||
convert_case = "0.6.0"
|
coarsetime = "0.1.36"
|
||||||
criterion = { version = "0.5.1" }
|
convert_case = "0.8.0"
|
||||||
csv = "1.3.0"
|
criterion = { version = "0.6.0" }
|
||||||
data-encoding = "2.6.0"
|
csv = "1.3.1"
|
||||||
|
data-encoding = "2.9.0"
|
||||||
difflib = "0.4.0"
|
difflib = "0.4.0"
|
||||||
dirs = "5.0.1"
|
dirs = "6.0.0"
|
||||||
dunce = "1.0.5"
|
dunce = "1.0.5"
|
||||||
embed-resource = "2.4"
|
embed-resource = "3.0.4"
|
||||||
envy = "0.4.2"
|
envy = "0.4.2"
|
||||||
flate2 = "1.0.34"
|
flate2 = "1.1.2"
|
||||||
fluent = "0.16.1"
|
fluent = "0.17.0"
|
||||||
fluent-bundle = "0.15.3"
|
fluent-bundle = "0.16.0"
|
||||||
fluent-syntax = "0.11.1"
|
fluent-syntax = "0.12.0"
|
||||||
fnv = "1.0.7"
|
fnv = "1.0.7"
|
||||||
futures = "0.3.31"
|
futures = "0.3.31"
|
||||||
glob = "0.3.1"
|
globset = "0.4.16"
|
||||||
globset = "0.4.15"
|
|
||||||
hex = "0.4.3"
|
hex = "0.4.3"
|
||||||
htmlescape = "0.3.1"
|
htmlescape = "0.3.1"
|
||||||
hyper = "1"
|
hyper = "1"
|
||||||
id_tree = "1.8.0"
|
id_tree = "1.8.0"
|
||||||
inflections = "1.1.1"
|
inflections = "1.1.1"
|
||||||
intl-memoizer = "0.5.2"
|
intl-memoizer = "0.5.3"
|
||||||
itertools = "0.13.0"
|
itertools = "0.14.0"
|
||||||
junction = "1.2.0"
|
junction = "1.2.0"
|
||||||
lazy_static = "1.5.0"
|
|
||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
libc-stdhandle = "0.1"
|
libc-stdhandle = "0.1"
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
nom = "7.1.3"
|
nom = "8.0.0"
|
||||||
num-format = "0.4.4"
|
num-format = "0.4.4"
|
||||||
num_cpus = "1.16.0"
|
num_cpus = "1.17.0"
|
||||||
num_enum = "0.7.3"
|
num_enum = "0.7.3"
|
||||||
once_cell = "1.20.2"
|
once_cell = "1.21.3"
|
||||||
pbkdf2 = { version = "0.12", features = ["simple"] }
|
pbkdf2 = { version = "0.12", features = ["simple"] }
|
||||||
phf = { version = "0.11.2", features = ["macros"] }
|
phf = { version = "0.11.3", features = ["macros"] }
|
||||||
pin-project = "1.1.6"
|
pin-project = "1.1.10"
|
||||||
plist = "1.7.0"
|
prettyplease = "0.2.34"
|
||||||
prettyplease = "0.2.24"
|
|
||||||
prost = "0.13"
|
prost = "0.13"
|
||||||
prost-build = "0.13"
|
prost-build = "0.13"
|
||||||
prost-reflect = "0.14"
|
prost-reflect = "0.14.7"
|
||||||
prost-types = "0.13"
|
prost-types = "0.13"
|
||||||
pulldown-cmark = "0.9.6"
|
pulldown-cmark = "0.13.0"
|
||||||
pyo3 = { version = "0.24", features = ["extension-module", "abi3", "abi3-py39"] }
|
pyo3 = { version = "0.25.1", features = ["extension-module", "abi3", "abi3-py39"] }
|
||||||
rand = "0.8.5"
|
rand = "0.9.1"
|
||||||
regex = "1.11.0"
|
rayon = "1.10.0"
|
||||||
reqwest = { version = "0.12.8", default-features = false, features = ["json", "socks", "stream", "multipart"] }
|
regex = "1.11.1"
|
||||||
rusqlite = { version = "0.30.0", features = ["trace", "functions", "collation", "bundled"] }
|
reqwest = { version = "0.12.20", default-features = false, features = ["json", "socks", "stream", "multipart"] }
|
||||||
|
rusqlite = { version = "0.36.0", features = ["trace", "functions", "collation", "bundled"] }
|
||||||
rustls-pemfile = "2.2.0"
|
rustls-pemfile = "2.2.0"
|
||||||
scopeguard = "1.2.0"
|
scopeguard = "1.2.0"
|
||||||
serde = { version = "1.0.210", features = ["derive"] }
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
serde-aux = "4.5.0"
|
serde-aux = "4.7.0"
|
||||||
serde_json = "1.0.132"
|
serde_json = "1.0.140"
|
||||||
serde_repr = "0.1.19"
|
serde_repr = "0.1.20"
|
||||||
serde_tuple = "0.5.0"
|
serde_tuple = "1.1.0"
|
||||||
sha1 = "0.10.6"
|
sha1 = "0.10.6"
|
||||||
sha2 = { version = "0.10.8" }
|
sha2 = { version = "0.10.9" }
|
||||||
simple-file-manifest = "0.11.0"
|
|
||||||
snafu = { version = "0.8.6", features = ["rust_1_61"] }
|
snafu = { version = "0.8.6", features = ["rust_1_61"] }
|
||||||
strum = { version = "0.26.3", features = ["derive"] }
|
strum = { version = "0.27.1", features = ["derive"] }
|
||||||
syn = { version = "2.0.82", features = ["parsing", "printing"] }
|
syn = { version = "2.0.103", features = ["parsing", "printing"] }
|
||||||
tar = "0.4.42"
|
tar = "0.4.44"
|
||||||
tempfile = "3.13.0"
|
tempfile = "3.20.0"
|
||||||
termcolor = "1.4.1"
|
termcolor = "1.4.1"
|
||||||
tokio = { version = "1.40", features = ["fs", "rt-multi-thread", "macros", "signal"] }
|
tokio = { version = "1.45", features = ["fs", "rt-multi-thread", "macros", "signal"] }
|
||||||
tokio-util = { version = "0.7.12", features = ["io"] }
|
tokio-util = { version = "0.7.15", features = ["io"] }
|
||||||
tower-http = { version = "0.5", features = ["trace"] }
|
tower-http = { version = "0.6.6", features = ["trace"] }
|
||||||
tracing = { version = "0.1.40", features = ["max_level_trace", "release_max_level_debug"] }
|
tracing = { version = "0.1.41", features = ["max_level_trace", "release_max_level_debug"] }
|
||||||
tracing-appender = "0.2.3"
|
tracing-appender = "0.2.3"
|
||||||
tracing-subscriber = { version = "0.3.18", features = ["fmt", "env-filter"] }
|
tracing-subscriber = { version = "0.3.20", features = ["fmt", "env-filter"] }
|
||||||
tugger-windows-codesign = "0.10.0"
|
unic-langid = { version = "0.9.6", features = ["macros"] }
|
||||||
unic-langid = { version = "0.9.5", features = ["macros"] }
|
|
||||||
unic-ucd-category = "0.9.0"
|
unic-ucd-category = "0.9.0"
|
||||||
unicode-normalization = "0.1.24"
|
unicode-normalization = "0.1.24"
|
||||||
walkdir = "2.5.0"
|
walkdir = "2.5.0"
|
||||||
which = "5.0.0"
|
which = "8.0.0"
|
||||||
winapi = { version = "0.3", features = ["wincon"] }
|
widestring = "1.1.0"
|
||||||
wiremock = "0.6.2"
|
winapi = { version = "0.3", features = ["wincon", "winreg"] }
|
||||||
|
windows = { version = "0.61.3", features = ["Media_SpeechSynthesis", "Media_Core", "Foundation_Collections", "Storage_Streams", "Win32_System_Console", "Win32_System_Registry", "Win32_System_SystemInformation", "Win32_Foundation", "Win32_UI_Shell", "Wdk_System_SystemServices"] }
|
||||||
|
wiremock = "0.6.3"
|
||||||
xz2 = "0.1.7"
|
xz2 = "0.1.7"
|
||||||
zip = { version = "0.6.6", default-features = false, features = ["deflate", "time"] }
|
zip = { version = "4.1.0", default-features = false, features = ["deflate", "time"] }
|
||||||
zstd = { version = "0.13.2", features = ["zstdmt"] }
|
zstd = { version = "0.13.3", features = ["zstdmt"] }
|
||||||
|
|
||||||
# Apply mild optimizations to our dependencies in dev mode, which among other things
|
# Apply mild optimizations to our dependencies in dev mode, which among other things
|
||||||
# improves sha2 performance by about 21x. Opt 1 chosen due to
|
# improves sha2 performance by about 21x. Opt 1 chosen due to
|
||||||
|
|
2
LICENSE
2
LICENSE
|
@ -6,8 +6,6 @@ The following included source code items use a license other than AGPL3:
|
||||||
|
|
||||||
In the pylib folder:
|
In the pylib folder:
|
||||||
|
|
||||||
* The SuperMemo importer: GPL3 and 0BSD.
|
|
||||||
* The Pauker importer: BSD-3.
|
|
||||||
* statsbg.py: CC BY 4.0.
|
* statsbg.py: CC BY 4.0.
|
||||||
|
|
||||||
In the qt folder:
|
In the qt folder:
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Anki
|
# Anki®
|
||||||
|
|
||||||
[](https://buildkite.com/ankitects/anki-ci)
|
[](https://buildkite.com/ankitects/anki-ci)
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,6 @@ pub fn build_and_check_aqt(build: &mut Build) -> Result<()> {
|
||||||
build_forms(build)?;
|
build_forms(build)?;
|
||||||
build_generated_sources(build)?;
|
build_generated_sources(build)?;
|
||||||
build_data_folder(build)?;
|
build_data_folder(build)?;
|
||||||
build_macos_helper(build)?;
|
|
||||||
build_wheel(build)?;
|
build_wheel(build)?;
|
||||||
check_python(build)?;
|
check_python(build)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -39,7 +38,6 @@ fn build_forms(build: &mut Build) -> Result<()> {
|
||||||
let mut py_files = vec![];
|
let mut py_files = vec![];
|
||||||
for path in ui_files.resolve() {
|
for path in ui_files.resolve() {
|
||||||
let outpath = outdir.join(path.file_name().unwrap()).into_string();
|
let outpath = outdir.join(path.file_name().unwrap()).into_string();
|
||||||
py_files.push(outpath.replace(".ui", "_qt5.py"));
|
|
||||||
py_files.push(outpath.replace(".ui", "_qt6.py"));
|
py_files.push(outpath.replace(".ui", "_qt6.py"));
|
||||||
}
|
}
|
||||||
build.add_action(
|
build.add_action(
|
||||||
|
@ -337,27 +335,6 @@ impl BuildAction for BuildThemedIcon<'_> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn build_macos_helper(build: &mut Build) -> Result<()> {
|
|
||||||
if cfg!(target_os = "macos") {
|
|
||||||
build.add_action(
|
|
||||||
"qt:aqt:data:lib:libankihelper",
|
|
||||||
RunCommand {
|
|
||||||
command: ":pyenv:bin",
|
|
||||||
args: "$script $out $in",
|
|
||||||
inputs: hashmap! {
|
|
||||||
"script" => inputs!["qt/mac/helper_build.py"],
|
|
||||||
"in" => inputs![glob!["qt/mac/*.swift"]],
|
|
||||||
"" => inputs!["out/env"],
|
|
||||||
},
|
|
||||||
outputs: hashmap! {
|
|
||||||
"out" => vec!["qt/_aqt/data/lib/libankihelper.dylib"],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_wheel(build: &mut Build) -> Result<()> {
|
fn build_wheel(build: &mut Build) -> Result<()> {
|
||||||
build.add_action(
|
build.add_action(
|
||||||
"wheels:aqt",
|
"wheels:aqt",
|
||||||
|
@ -365,7 +342,12 @@ fn build_wheel(build: &mut Build) -> Result<()> {
|
||||||
name: "aqt",
|
name: "aqt",
|
||||||
version: anki_version(),
|
version: anki_version(),
|
||||||
platform: None,
|
platform: None,
|
||||||
deps: inputs![":qt:aqt", glob!("qt/aqt/**"), "qt/pyproject.toml"],
|
deps: inputs![
|
||||||
|
":qt:aqt",
|
||||||
|
glob!("qt/aqt/**"),
|
||||||
|
"qt/pyproject.toml",
|
||||||
|
"qt/hatch_build.py"
|
||||||
|
],
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,7 +68,8 @@ pub fn build_pylib(build: &mut Build) -> Result<()> {
|
||||||
deps: inputs![
|
deps: inputs![
|
||||||
":pylib:anki",
|
":pylib:anki",
|
||||||
glob!("pylib/anki/**"),
|
glob!("pylib/anki/**"),
|
||||||
"pylib/pyproject.toml"
|
"pylib/pyproject.toml",
|
||||||
|
"pylib/hatch_build.py"
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
|
@ -1,34 +1,61 @@
|
||||||
// Copyright: Ankitects Pty Ltd and contributors
|
// Copyright: Ankitects Pty Ltd and contributors
|
||||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
use std::env;
|
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use ninja_gen::action::BuildAction;
|
use ninja_gen::action::BuildAction;
|
||||||
use ninja_gen::archives::Platform;
|
use ninja_gen::archives::Platform;
|
||||||
use ninja_gen::build::FilesHandle;
|
use ninja_gen::build::FilesHandle;
|
||||||
use ninja_gen::command::RunCommand;
|
|
||||||
use ninja_gen::copy::CopyFiles;
|
use ninja_gen::copy::CopyFiles;
|
||||||
use ninja_gen::glob;
|
use ninja_gen::glob;
|
||||||
use ninja_gen::hashmap;
|
|
||||||
use ninja_gen::input::BuildInput;
|
use ninja_gen::input::BuildInput;
|
||||||
use ninja_gen::inputs;
|
use ninja_gen::inputs;
|
||||||
use ninja_gen::python::python_format;
|
use ninja_gen::python::python_format;
|
||||||
use ninja_gen::python::PythonEnvironment;
|
use ninja_gen::python::PythonEnvironment;
|
||||||
use ninja_gen::python::PythonLint;
|
|
||||||
use ninja_gen::python::PythonTypecheck;
|
use ninja_gen::python::PythonTypecheck;
|
||||||
use ninja_gen::rsync::RsyncFiles;
|
use ninja_gen::python::RuffCheck;
|
||||||
use ninja_gen::Build;
|
use ninja_gen::Build;
|
||||||
|
|
||||||
|
/// Normalize version string by removing leading zeros from numeric parts
|
||||||
|
/// while preserving pre-release markers (b1, rc2, a3, etc.)
|
||||||
|
fn normalize_version(version: &str) -> String {
|
||||||
|
version
|
||||||
|
.split('.')
|
||||||
|
.map(|part| {
|
||||||
|
// Check if the part contains only digits
|
||||||
|
if part.chars().all(|c| c.is_ascii_digit()) {
|
||||||
|
// Numeric part: remove leading zeros
|
||||||
|
part.parse::<u32>().unwrap_or(0).to_string()
|
||||||
|
} else {
|
||||||
|
// Mixed part (contains both numbers and pre-release markers)
|
||||||
|
// Split on first non-digit character and normalize the numeric prefix
|
||||||
|
let chars = part.chars();
|
||||||
|
let mut numeric_prefix = String::new();
|
||||||
|
let mut rest = String::new();
|
||||||
|
let mut found_non_digit = false;
|
||||||
|
|
||||||
|
for ch in chars {
|
||||||
|
if ch.is_ascii_digit() && !found_non_digit {
|
||||||
|
numeric_prefix.push(ch);
|
||||||
|
} else {
|
||||||
|
found_non_digit = true;
|
||||||
|
rest.push(ch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if numeric_prefix.is_empty() {
|
||||||
|
part.to_string()
|
||||||
|
} else {
|
||||||
|
let normalized_prefix = numeric_prefix.parse::<u32>().unwrap_or(0).to_string();
|
||||||
|
format!("{normalized_prefix}{rest}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(".")
|
||||||
|
}
|
||||||
|
|
||||||
pub fn setup_venv(build: &mut Build) -> Result<()> {
|
pub fn setup_venv(build: &mut Build) -> Result<()> {
|
||||||
let extra_binary_exports = &[
|
let extra_binary_exports = &["mypy", "ruff", "pytest", "protoc-gen-mypy"];
|
||||||
"mypy",
|
|
||||||
"black",
|
|
||||||
"isort",
|
|
||||||
"pylint",
|
|
||||||
"pytest",
|
|
||||||
"protoc-gen-mypy",
|
|
||||||
];
|
|
||||||
build.add_action(
|
build.add_action(
|
||||||
"pyenv",
|
"pyenv",
|
||||||
PythonEnvironment {
|
PythonEnvironment {
|
||||||
|
@ -96,7 +123,14 @@ impl BuildAction for BuildWheel {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn files(&mut self, build: &mut impl FilesHandle) {
|
fn files(&mut self, build: &mut impl FilesHandle) {
|
||||||
build.add_inputs("uv", inputs![":uv_binary"]);
|
if std::env::var("OFFLINE_BUILD").ok().as_deref() == Some("1") {
|
||||||
|
let uv_path =
|
||||||
|
std::env::var("UV_BINARY").expect("UV_BINARY must be set in OFFLINE_BUILD mode");
|
||||||
|
build.add_inputs("uv", inputs![uv_path]);
|
||||||
|
} else {
|
||||||
|
build.add_inputs("uv", inputs![":uv_binary"]);
|
||||||
|
}
|
||||||
|
|
||||||
build.add_inputs("", &self.deps);
|
build.add_inputs("", &self.deps);
|
||||||
|
|
||||||
// Set the project directory based on which package we're building
|
// Set the project directory based on which package we're building
|
||||||
|
@ -131,14 +165,7 @@ impl BuildAction for BuildWheel {
|
||||||
|
|
||||||
let name = self.name;
|
let name = self.name;
|
||||||
|
|
||||||
// Normalize version like hatchling does: remove leading zeros from version
|
let normalized_version = normalize_version(&self.version);
|
||||||
// parts
|
|
||||||
let normalized_version = self
|
|
||||||
.version
|
|
||||||
.split('.')
|
|
||||||
.map(|part| part.parse::<u32>().unwrap_or(0).to_string())
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.join(".");
|
|
||||||
|
|
||||||
let wheel_path = format!("wheels/{name}-{normalized_version}-{tag}.whl");
|
let wheel_path = format!("wheels/{name}-{normalized_version}-{tag}.whl");
|
||||||
build.add_outputs("out", vec![wheel_path]);
|
build.add_outputs("out", vec![wheel_path]);
|
||||||
|
@ -168,60 +195,26 @@ pub fn check_python(build: &mut Build) -> Result<()> {
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
add_pylint(build)?;
|
let ruff_folders = &["qt/aqt", "ftl", "pylib/tools", "tools", "python"];
|
||||||
|
let ruff_deps = inputs![
|
||||||
Ok(())
|
glob!["{pylib,ftl,qt,python,tools}/**/*.py"],
|
||||||
}
|
":pylib:anki",
|
||||||
|
":qt:aqt"
|
||||||
fn add_pylint(build: &mut Build) -> Result<()> {
|
];
|
||||||
// pylint does not support PEP420 implicit namespaces split across import paths,
|
|
||||||
// so we need to merge our pylib sources and generated files before invoking it,
|
|
||||||
// and add a top-level __init__.py
|
|
||||||
build.add_action(
|
build.add_action(
|
||||||
"check:pylint:copy_pylib",
|
"check:ruff",
|
||||||
RsyncFiles {
|
RuffCheck {
|
||||||
inputs: inputs![":pylib:anki"],
|
folders: ruff_folders,
|
||||||
target_folder: "pylint/anki",
|
deps: ruff_deps.clone(),
|
||||||
strip_prefix: "$builddir/pylib/anki",
|
check_only: true,
|
||||||
// avoid copying our large rsbridge binary
|
|
||||||
extra_args: "--links",
|
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
build.add_action(
|
build.add_action(
|
||||||
"check:pylint:copy_pylib",
|
"fix:ruff",
|
||||||
RsyncFiles {
|
RuffCheck {
|
||||||
inputs: inputs![glob!["pylib/anki/**"]],
|
folders: ruff_folders,
|
||||||
target_folder: "pylint/anki",
|
deps: ruff_deps,
|
||||||
strip_prefix: "pylib/anki",
|
check_only: false,
|
||||||
extra_args: "",
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
build.add_action(
|
|
||||||
"check:pylint:copy_pylib",
|
|
||||||
RunCommand {
|
|
||||||
command: ":pyenv:bin",
|
|
||||||
args: "$script $out",
|
|
||||||
inputs: hashmap! { "script" => inputs!["python/mkempty.py"] },
|
|
||||||
outputs: hashmap! { "out" => vec!["pylint/anki/__init__.py"] },
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
build.add_action(
|
|
||||||
"check:pylint",
|
|
||||||
PythonLint {
|
|
||||||
folders: &[
|
|
||||||
"$builddir/pylint/anki",
|
|
||||||
"qt/aqt",
|
|
||||||
"ftl",
|
|
||||||
"pylib/tools",
|
|
||||||
"tools",
|
|
||||||
"python",
|
|
||||||
],
|
|
||||||
pylint_ini: inputs![".pylintrc"],
|
|
||||||
deps: inputs![
|
|
||||||
":check:pylint:copy_pylib",
|
|
||||||
":qt:aqt",
|
|
||||||
glob!("{pylib/tools,ftl,qt,python,tools}/**/*.py")
|
|
||||||
],
|
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
@ -234,15 +227,19 @@ struct Sphinx {
|
||||||
|
|
||||||
impl BuildAction for Sphinx {
|
impl BuildAction for Sphinx {
|
||||||
fn command(&self) -> &str {
|
fn command(&self) -> &str {
|
||||||
if env::var("OFFLINE_BUILD").is_err() {
|
if std::env::var("OFFLINE_BUILD").ok().as_deref() == Some("1") {
|
||||||
"$uv sync --extra sphinx && $python python/sphinx/build.py"
|
|
||||||
} else {
|
|
||||||
"$python python/sphinx/build.py"
|
"$python python/sphinx/build.py"
|
||||||
|
} else {
|
||||||
|
"$uv sync --extra sphinx && $python python/sphinx/build.py"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn files(&mut self, build: &mut impl FilesHandle) {
|
fn files(&mut self, build: &mut impl FilesHandle) {
|
||||||
if env::var("OFFLINE_BUILD").is_err() {
|
if std::env::var("OFFLINE_BUILD").ok().as_deref() == Some("1") {
|
||||||
|
let uv_path =
|
||||||
|
std::env::var("UV_BINARY").expect("UV_BINARY must be set in OFFLINE_BUILD mode");
|
||||||
|
build.add_inputs("uv", inputs![uv_path]);
|
||||||
|
} else {
|
||||||
build.add_inputs("uv", inputs![":uv_binary"]);
|
build.add_inputs("uv", inputs![":uv_binary"]);
|
||||||
// Set environment variable to use the existing pyenv
|
// Set environment variable to use the existing pyenv
|
||||||
build.add_variable("pyenv_path", "$builddir/pyenv");
|
build.add_variable("pyenv_path", "$builddir/pyenv");
|
||||||
|
@ -279,3 +276,25 @@ pub(crate) fn setup_sphinx(build: &mut Build) -> Result<()> {
|
||||||
)?;
|
)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_normalize_version_basic() {
|
||||||
|
assert_eq!(normalize_version("1.2.3"), "1.2.3");
|
||||||
|
assert_eq!(normalize_version("01.02.03"), "1.2.3");
|
||||||
|
assert_eq!(normalize_version("1.0.0"), "1.0.0");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_normalize_version_with_prerelease() {
|
||||||
|
assert_eq!(normalize_version("1.2.3b1"), "1.2.3b1");
|
||||||
|
assert_eq!(normalize_version("01.02.03b1"), "1.2.3b1");
|
||||||
|
assert_eq!(normalize_version("1.0.0rc2"), "1.0.0rc2");
|
||||||
|
assert_eq!(normalize_version("2.1.0a3"), "2.1.0a3");
|
||||||
|
assert_eq!(normalize_version("1.2.3beta1"), "1.2.3beta1");
|
||||||
|
assert_eq!(normalize_version("1.2.3alpha1"), "1.2.3alpha1");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -169,7 +169,7 @@ fn build_rsbridge(build: &mut Build) -> Result<()> {
|
||||||
|
|
||||||
pub fn check_rust(build: &mut Build) -> Result<()> {
|
pub fn check_rust(build: &mut Build) -> Result<()> {
|
||||||
let inputs = inputs![
|
let inputs = inputs![
|
||||||
glob!("{rslib/**,pylib/rsbridge/**,ftl/**,build/**,tools/workspace-hack/**}"),
|
glob!("{rslib/**,pylib/rsbridge/**,ftl/**,build/**,qt/launcher/**,tools/minilints/**}"),
|
||||||
"Cargo.lock",
|
"Cargo.lock",
|
||||||
"Cargo.toml",
|
"Cargo.toml",
|
||||||
"rust-toolchain.toml",
|
"rust-toolchain.toml",
|
||||||
|
@ -247,7 +247,7 @@ pub fn check_minilints(build: &mut Build) -> Result<()> {
|
||||||
let files = inputs![
|
let files = inputs![
|
||||||
glob![
|
glob![
|
||||||
"**/*.{py,rs,ts,svelte,mjs,md}",
|
"**/*.{py,rs,ts,svelte,mjs,md}",
|
||||||
"{node_modules,qt/bundle/PyOxidizer,ts/.svelte-kit}/**"
|
"{node_modules,ts/.svelte-kit}/**"
|
||||||
],
|
],
|
||||||
"Cargo.lock"
|
"Cargo.lock"
|
||||||
];
|
];
|
||||||
|
|
|
@ -18,6 +18,7 @@ maplit.workspace = true
|
||||||
num_cpus.workspace = true
|
num_cpus.workspace = true
|
||||||
regex.workspace = true
|
regex.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
|
sha2.workspace = true
|
||||||
walkdir.workspace = true
|
walkdir.workspace = true
|
||||||
which.workspace = true
|
which.workspace = true
|
||||||
|
|
||||||
|
@ -30,3 +31,11 @@ reqwest = { workspace = true, features = ["blocking", "json", "rustls-tls"] }
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "update_uv"
|
name = "update_uv"
|
||||||
path = "src/bin/update_uv.rs"
|
path = "src/bin/update_uv.rs"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "update_protoc"
|
||||||
|
path = "src/bin/update_protoc.rs"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "update_node"
|
||||||
|
path = "src/bin/update_node.rs"
|
||||||
|
|
|
@ -49,6 +49,46 @@ pub trait BuildAction {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> &'static str {
|
fn name(&self) -> &'static str {
|
||||||
std::any::type_name::<Self>().split("::").last().unwrap()
|
std::any::type_name::<Self>()
|
||||||
|
.split("::")
|
||||||
|
.last()
|
||||||
|
.unwrap()
|
||||||
|
.split('<')
|
||||||
|
.next()
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
trait TestBuildAction {}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
impl<T: TestBuildAction + ?Sized> BuildAction for T {
|
||||||
|
fn command(&self) -> &str {
|
||||||
|
"test"
|
||||||
|
}
|
||||||
|
fn files(&mut self, _build: &mut impl FilesHandle) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code, unused_variables)]
|
||||||
|
#[test]
|
||||||
|
fn should_strip_regions_in_type_name() {
|
||||||
|
struct Bare;
|
||||||
|
impl TestBuildAction for Bare {}
|
||||||
|
assert_eq!(Bare {}.name(), "Bare");
|
||||||
|
|
||||||
|
struct WithLifeTime<'a>(&'a str);
|
||||||
|
impl TestBuildAction for WithLifeTime<'_> {}
|
||||||
|
assert_eq!(WithLifeTime("test").name(), "WithLifeTime");
|
||||||
|
|
||||||
|
struct WithMultiLifeTime<'a, 'b>(&'a str, &'b str);
|
||||||
|
impl TestBuildAction for WithMultiLifeTime<'_, '_> {}
|
||||||
|
assert_eq!(
|
||||||
|
WithMultiLifeTime("test", "test").name(),
|
||||||
|
"WithMultiLifeTime"
|
||||||
|
);
|
||||||
|
|
||||||
|
struct WithGeneric<T>(T);
|
||||||
|
impl<T> TestBuildAction for WithGeneric<T> {}
|
||||||
|
assert_eq!(WithGeneric(3).name(), "WithGeneric");
|
||||||
|
}
|
||||||
|
|
|
@ -67,7 +67,7 @@ impl Platform {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Append .exe to path if on Windows.
|
/// Append .exe to path if on Windows.
|
||||||
pub fn with_exe(path: &str) -> Cow<str> {
|
pub fn with_exe(path: &str) -> Cow<'_, str> {
|
||||||
if cfg!(windows) {
|
if cfg!(windows) {
|
||||||
format!("{path}.exe").into()
|
format!("{path}.exe").into()
|
||||||
} else {
|
} else {
|
||||||
|
|
268
build/ninja_gen/src/bin/update_node.rs
Normal file
268
build/ninja_gen/src/bin/update_node.rs
Normal file
|
@ -0,0 +1,268 @@
|
||||||
|
// Copyright: Ankitects Pty Ltd and contributors
|
||||||
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
|
use std::error::Error;
|
||||||
|
use std::fs;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use regex::Regex;
|
||||||
|
use reqwest::blocking::Client;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct NodeRelease {
|
||||||
|
version: String,
|
||||||
|
files: Vec<NodeFile>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct NodeFile {
|
||||||
|
filename: String,
|
||||||
|
url: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() -> Result<(), Box<dyn Error>> {
|
||||||
|
let release_info = fetch_node_release_info()?;
|
||||||
|
let new_text = generate_node_archive_function(&release_info)?;
|
||||||
|
update_node_text(&new_text)?;
|
||||||
|
println!("Node.js archive function updated successfully!");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fetch_node_release_info() -> Result<NodeRelease, Box<dyn Error>> {
|
||||||
|
let client = Client::new();
|
||||||
|
|
||||||
|
// Get the Node.js release info
|
||||||
|
let response = client
|
||||||
|
.get("https://nodejs.org/dist/index.json")
|
||||||
|
.header("User-Agent", "anki-build-updater")
|
||||||
|
.send()?;
|
||||||
|
|
||||||
|
let releases: Vec<Value> = response.json()?;
|
||||||
|
|
||||||
|
// Find the latest LTS release
|
||||||
|
let latest = releases
|
||||||
|
.iter()
|
||||||
|
.find(|release| {
|
||||||
|
// LTS releases have a non-false "lts" field
|
||||||
|
release["lts"].as_str().is_some() && release["lts"] != false
|
||||||
|
})
|
||||||
|
.ok_or("No LTS releases found")?;
|
||||||
|
|
||||||
|
let version = latest["version"]
|
||||||
|
.as_str()
|
||||||
|
.ok_or("Version not found")?
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
let files = latest["files"]
|
||||||
|
.as_array()
|
||||||
|
.ok_or("Files array not found")?
|
||||||
|
.iter()
|
||||||
|
.map(|f| f.as_str().unwrap_or(""))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let lts_name = latest["lts"].as_str().unwrap_or("unknown");
|
||||||
|
println!("Found Node.js LTS version: {version} ({lts_name})");
|
||||||
|
|
||||||
|
// Map platforms to their expected file keys and full filenames
|
||||||
|
let platform_mapping = vec![
|
||||||
|
(
|
||||||
|
"linux-x64",
|
||||||
|
"linux-x64",
|
||||||
|
format!("node-{version}-linux-x64.tar.xz"),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"linux-arm64",
|
||||||
|
"linux-arm64",
|
||||||
|
format!("node-{version}-linux-arm64.tar.xz"),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"darwin-x64",
|
||||||
|
"osx-x64-tar",
|
||||||
|
format!("node-{version}-darwin-x64.tar.xz"),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"darwin-arm64",
|
||||||
|
"osx-arm64-tar",
|
||||||
|
format!("node-{version}-darwin-arm64.tar.xz"),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"win-x64",
|
||||||
|
"win-x64-zip",
|
||||||
|
format!("node-{version}-win-x64.zip"),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"win-arm64",
|
||||||
|
"win-arm64-zip",
|
||||||
|
format!("node-{version}-win-arm64.zip"),
|
||||||
|
),
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut node_files = Vec::new();
|
||||||
|
|
||||||
|
for (platform, file_key, filename) in platform_mapping {
|
||||||
|
// Check if this file exists in the release
|
||||||
|
if files.contains(&file_key) {
|
||||||
|
let url = format!("https://nodejs.org/dist/{version}/{filename}");
|
||||||
|
node_files.push(NodeFile {
|
||||||
|
filename: filename.clone(),
|
||||||
|
url,
|
||||||
|
});
|
||||||
|
println!("Found file for {platform}: {filename} (key: {file_key})");
|
||||||
|
} else {
|
||||||
|
return Err(
|
||||||
|
format!("File not found for {platform} (key: {file_key}): {filename}").into(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(NodeRelease {
|
||||||
|
version,
|
||||||
|
files: node_files,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn generate_node_archive_function(release: &NodeRelease) -> Result<String, Box<dyn Error>> {
|
||||||
|
let client = Client::new();
|
||||||
|
|
||||||
|
// Fetch the SHASUMS256.txt file once
|
||||||
|
println!("Fetching SHA256 checksums...");
|
||||||
|
let shasums_url = format!("https://nodejs.org/dist/{}/SHASUMS256.txt", release.version);
|
||||||
|
let shasums_response = client
|
||||||
|
.get(&shasums_url)
|
||||||
|
.header("User-Agent", "anki-build-updater")
|
||||||
|
.send()?;
|
||||||
|
let shasums_text = shasums_response.text()?;
|
||||||
|
|
||||||
|
// Create a mapping from filename patterns to platform names - using the exact
|
||||||
|
// patterns we stored in files
|
||||||
|
let platform_mapping = vec![
|
||||||
|
("linux-x64.tar.xz", "LinuxX64"),
|
||||||
|
("linux-arm64.tar.xz", "LinuxArm"),
|
||||||
|
("darwin-x64.tar.xz", "MacX64"),
|
||||||
|
("darwin-arm64.tar.xz", "MacArm"),
|
||||||
|
("win-x64.zip", "WindowsX64"),
|
||||||
|
("win-arm64.zip", "WindowsArm"),
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut platform_blocks = Vec::new();
|
||||||
|
|
||||||
|
for (file_pattern, platform_name) in platform_mapping {
|
||||||
|
// Find the file that ends with this pattern
|
||||||
|
if let Some(file) = release
|
||||||
|
.files
|
||||||
|
.iter()
|
||||||
|
.find(|f| f.filename.ends_with(file_pattern))
|
||||||
|
{
|
||||||
|
// Find the SHA256 for this file
|
||||||
|
let sha256 = shasums_text
|
||||||
|
.lines()
|
||||||
|
.find(|line| line.contains(&file.filename))
|
||||||
|
.and_then(|line| line.split_whitespace().next())
|
||||||
|
.ok_or_else(|| format!("SHA256 not found for {}", file.filename))?;
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"Found SHA256 for {}: {} => {}",
|
||||||
|
platform_name, file.filename, sha256
|
||||||
|
);
|
||||||
|
|
||||||
|
let block = format!(
|
||||||
|
" Platform::{} => OnlineArchive {{\n url: \"{}\",\n sha256: \"{}\",\n }},",
|
||||||
|
platform_name, file.url, sha256
|
||||||
|
);
|
||||||
|
platform_blocks.push(block);
|
||||||
|
} else {
|
||||||
|
return Err(format!(
|
||||||
|
"File not found for platform {platform_name}: no file ending with {file_pattern}"
|
||||||
|
)
|
||||||
|
.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let function = format!(
|
||||||
|
"pub fn node_archive(platform: Platform) -> OnlineArchive {{\n match platform {{\n{}\n }}\n}}",
|
||||||
|
platform_blocks.join("\n")
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(function)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_node_text(new_function: &str) -> Result<(), Box<dyn Error>> {
|
||||||
|
let node_rs_content = read_node_rs()?;
|
||||||
|
|
||||||
|
// Regex to match the entire node_archive function with proper multiline
|
||||||
|
// matching
|
||||||
|
let re = Regex::new(
|
||||||
|
r"(?s)pub fn node_archive\(platform: Platform\) -> OnlineArchive \{.*?\n\s*\}\s*\n\s*\}",
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let updated_content = re.replace(&node_rs_content, new_function);
|
||||||
|
|
||||||
|
write_node_rs(&updated_content)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_node_rs() -> Result<String, Box<dyn Error>> {
|
||||||
|
// Use CARGO_MANIFEST_DIR to get the crate root, then find src/node.rs
|
||||||
|
let manifest_dir =
|
||||||
|
std::env::var("CARGO_MANIFEST_DIR").map_err(|_| "CARGO_MANIFEST_DIR not set")?;
|
||||||
|
let path = Path::new(&manifest_dir).join("src").join("node.rs");
|
||||||
|
Ok(fs::read_to_string(path)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_node_rs(content: &str) -> Result<(), Box<dyn Error>> {
|
||||||
|
// Use CARGO_MANIFEST_DIR to get the crate root, then find src/node.rs
|
||||||
|
let manifest_dir =
|
||||||
|
std::env::var("CARGO_MANIFEST_DIR").map_err(|_| "CARGO_MANIFEST_DIR not set")?;
|
||||||
|
let path = Path::new(&manifest_dir).join("src").join("node.rs");
|
||||||
|
fs::write(path, content)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_regex_replacement() {
|
||||||
|
let sample_content = r#"Some other code
|
||||||
|
pub fn node_archive(platform: Platform) -> OnlineArchive {
|
||||||
|
match platform {
|
||||||
|
Platform::LinuxX64 => OnlineArchive {
|
||||||
|
url: "https://nodejs.org/dist/v20.11.0/node-v20.11.0-linux-x64.tar.xz",
|
||||||
|
sha256: "old_hash",
|
||||||
|
},
|
||||||
|
Platform::MacX64 => OnlineArchive {
|
||||||
|
url: "https://nodejs.org/dist/v20.11.0/node-v20.11.0-darwin-x64.tar.xz",
|
||||||
|
sha256: "old_hash",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
More code here"#;
|
||||||
|
|
||||||
|
let new_function = r#"pub fn node_archive(platform: Platform) -> OnlineArchive {
|
||||||
|
match platform {
|
||||||
|
Platform::LinuxX64 => OnlineArchive {
|
||||||
|
url: "https://nodejs.org/dist/v21.0.0/node-v21.0.0-linux-x64.tar.xz",
|
||||||
|
sha256: "new_hash",
|
||||||
|
},
|
||||||
|
Platform::MacX64 => OnlineArchive {
|
||||||
|
url: "https://nodejs.org/dist/v21.0.0/node-v21.0.0-darwin-x64.tar.xz",
|
||||||
|
sha256: "new_hash",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}"#;
|
||||||
|
|
||||||
|
let re = Regex::new(
|
||||||
|
r"(?s)pub fn node_archive\(platform: Platform\) -> OnlineArchive \{.*?\n\s*\}\s*\n\s*\}"
|
||||||
|
).unwrap();
|
||||||
|
|
||||||
|
let result = re.replace(sample_content, new_function);
|
||||||
|
assert!(result.contains("v21.0.0"));
|
||||||
|
assert!(result.contains("new_hash"));
|
||||||
|
assert!(!result.contains("old_hash"));
|
||||||
|
assert!(result.contains("Some other code"));
|
||||||
|
assert!(result.contains("More code here"));
|
||||||
|
}
|
||||||
|
}
|
125
build/ninja_gen/src/bin/update_protoc.rs
Normal file
125
build/ninja_gen/src/bin/update_protoc.rs
Normal file
|
@ -0,0 +1,125 @@
|
||||||
|
// Copyright: Ankitects Pty Ltd and contributors
|
||||||
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
|
use std::error::Error;
|
||||||
|
use std::fs;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use regex::Regex;
|
||||||
|
use reqwest::blocking::Client;
|
||||||
|
use serde_json::Value;
|
||||||
|
use sha2::Digest;
|
||||||
|
use sha2::Sha256;
|
||||||
|
|
||||||
|
fn fetch_protoc_release_info() -> Result<String, Box<dyn Error>> {
|
||||||
|
let client = Client::new();
|
||||||
|
|
||||||
|
println!("Fetching latest protoc release info from GitHub...");
|
||||||
|
// Fetch latest release info
|
||||||
|
let response = client
|
||||||
|
.get("https://api.github.com/repos/protocolbuffers/protobuf/releases/latest")
|
||||||
|
.header("User-Agent", "Anki-Build-Script")
|
||||||
|
.send()?;
|
||||||
|
|
||||||
|
let release_info: Value = response.json()?;
|
||||||
|
let assets = release_info["assets"]
|
||||||
|
.as_array()
|
||||||
|
.expect("assets should be an array");
|
||||||
|
|
||||||
|
// Map platform names to their corresponding asset patterns
|
||||||
|
let platform_patterns = [
|
||||||
|
("LinuxX64", "linux-x86_64"),
|
||||||
|
("LinuxArm", "linux-aarch_64"),
|
||||||
|
("MacX64", "osx-universal_binary"), // Mac uses universal binary for both
|
||||||
|
("MacArm", "osx-universal_binary"),
|
||||||
|
("WindowsX64", "win64"), // Windows uses x86 binary for both archs
|
||||||
|
("WindowsArm", "win64"),
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut match_blocks = Vec::new();
|
||||||
|
|
||||||
|
for (platform, pattern) in platform_patterns {
|
||||||
|
// Find the asset matching the platform pattern
|
||||||
|
let asset = assets.iter().find(|asset| {
|
||||||
|
let name = asset["name"].as_str().unwrap_or("");
|
||||||
|
name.starts_with("protoc-") && name.contains(pattern) && name.ends_with(".zip")
|
||||||
|
});
|
||||||
|
|
||||||
|
if asset.is_none() {
|
||||||
|
eprintln!("No asset found for platform {platform} pattern {pattern}");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let asset = asset.unwrap();
|
||||||
|
let download_url = asset["browser_download_url"].as_str().unwrap();
|
||||||
|
let asset_name = asset["name"].as_str().unwrap();
|
||||||
|
|
||||||
|
// Download the file and calculate SHA256 locally
|
||||||
|
println!("Downloading and checksumming {asset_name} for {platform}...");
|
||||||
|
let response = client
|
||||||
|
.get(download_url)
|
||||||
|
.header("User-Agent", "Anki-Build-Script")
|
||||||
|
.send()?;
|
||||||
|
|
||||||
|
let bytes = response.bytes()?;
|
||||||
|
let mut hasher = Sha256::new();
|
||||||
|
hasher.update(&bytes);
|
||||||
|
let sha256 = format!("{:x}", hasher.finalize());
|
||||||
|
|
||||||
|
// Handle platform-specific match patterns
|
||||||
|
let match_pattern = match platform {
|
||||||
|
"MacX64" => "Platform::MacX64 | Platform::MacArm",
|
||||||
|
"MacArm" => continue, // Skip MacArm since it's handled with MacX64
|
||||||
|
"WindowsX64" => "Platform::WindowsX64 | Platform::WindowsArm",
|
||||||
|
"WindowsArm" => continue, // Skip WindowsArm since it's handled with WindowsX64
|
||||||
|
_ => &format!("Platform::{platform}"),
|
||||||
|
};
|
||||||
|
|
||||||
|
match_blocks.push(format!(
|
||||||
|
" {match_pattern} => {{\n OnlineArchive {{\n url: \"{download_url}\",\n sha256: \"{sha256}\",\n }}\n }}"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(format!(
|
||||||
|
"pub fn protoc_archive(platform: Platform) -> OnlineArchive {{\n match platform {{\n{}\n }}\n}}",
|
||||||
|
match_blocks.join(",\n")
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_protobuf_rs() -> Result<String, Box<dyn Error>> {
|
||||||
|
let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap_or_else(|_| ".".to_string());
|
||||||
|
let path = Path::new(&manifest_dir).join("src/protobuf.rs");
|
||||||
|
println!("Reading {}", path.display());
|
||||||
|
let content = fs::read_to_string(path)?;
|
||||||
|
Ok(content)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_protoc_text(old_text: &str, new_protoc_text: &str) -> Result<String, Box<dyn Error>> {
|
||||||
|
let re =
|
||||||
|
Regex::new(r"(?ms)^pub fn protoc_archive\(platform: Platform\) -> OnlineArchive \{.*?\n\}")
|
||||||
|
.unwrap();
|
||||||
|
if !re.is_match(old_text) {
|
||||||
|
return Err("Could not find protoc_archive function block to replace".into());
|
||||||
|
}
|
||||||
|
let new_content = re.replace(old_text, new_protoc_text).to_string();
|
||||||
|
println!("Original lines: {}", old_text.lines().count());
|
||||||
|
println!("Updated lines: {}", new_content.lines().count());
|
||||||
|
Ok(new_content)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_protobuf_rs(content: &str) -> Result<(), Box<dyn Error>> {
|
||||||
|
let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap_or_else(|_| ".".to_string());
|
||||||
|
let path = Path::new(&manifest_dir).join("src/protobuf.rs");
|
||||||
|
println!("Writing to {}", path.display());
|
||||||
|
fs::write(path, content)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() -> Result<(), Box<dyn Error>> {
|
||||||
|
let new_protoc_archive = fetch_protoc_release_info()?;
|
||||||
|
let content = read_protobuf_rs()?;
|
||||||
|
let updated_content = update_protoc_text(&content, &new_protoc_archive)?;
|
||||||
|
write_protobuf_rs(&updated_content)?;
|
||||||
|
println!("Successfully updated protoc_archive function in protobuf.rs");
|
||||||
|
Ok(())
|
||||||
|
}
|
|
@ -53,7 +53,7 @@ fn fetch_uv_release_info() -> Result<String, Box<dyn Error>> {
|
||||||
// Find the corresponding .sha256 or .sha256sum asset
|
// Find the corresponding .sha256 or .sha256sum asset
|
||||||
let sha_asset = assets.iter().find(|a| {
|
let sha_asset = assets.iter().find(|a| {
|
||||||
let name = a["name"].as_str().unwrap_or("");
|
let name = a["name"].as_str().unwrap_or("");
|
||||||
name == format!("{}.sha256", asset_name) || name == format!("{}.sha256sum", asset_name)
|
name == format!("{asset_name}.sha256") || name == format!("{asset_name}.sha256sum")
|
||||||
});
|
});
|
||||||
if sha_asset.is_none() {
|
if sha_asset.is_none() {
|
||||||
eprintln!("No sha256 asset found for {asset_name}");
|
eprintln!("No sha256 asset found for {asset_name}");
|
||||||
|
@ -71,8 +71,7 @@ fn fetch_uv_release_info() -> Result<String, Box<dyn Error>> {
|
||||||
let sha256 = sha_text.split_whitespace().next().unwrap_or("");
|
let sha256 = sha_text.split_whitespace().next().unwrap_or("");
|
||||||
|
|
||||||
match_blocks.push(format!(
|
match_blocks.push(format!(
|
||||||
" Platform::{} => {{\n OnlineArchive {{\n url: \"{}\",\n sha256: \"{}\",\n }}\n }}",
|
" Platform::{platform} => {{\n OnlineArchive {{\n url: \"{download_url}\",\n sha256: \"{sha256}\",\n }}\n }}"
|
||||||
platform, download_url, sha256
|
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,10 +134,7 @@ mod tests {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
updated_lines,
|
updated_lines,
|
||||||
original_lines - EXPECTED_LINES_REMOVED,
|
original_lines - EXPECTED_LINES_REMOVED,
|
||||||
"Expected line count to decrease by exactly {} lines (original: {}, updated: {})",
|
"Expected line count to decrease by exactly {EXPECTED_LINES_REMOVED} lines (original: {original_lines}, updated: {updated_lines})"
|
||||||
EXPECTED_LINES_REMOVED,
|
|
||||||
original_lines,
|
|
||||||
updated_lines
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -300,7 +300,7 @@ impl BuildStatement<'_> {
|
||||||
|
|
||||||
writeln!(buf, "build {outputs_str}: {action_name} {inputs_str}").unwrap();
|
writeln!(buf, "build {outputs_str}: {action_name} {inputs_str}").unwrap();
|
||||||
for (key, value) in self.variables.iter().sorted() {
|
for (key, value) in self.variables.iter().sorted() {
|
||||||
writeln!(buf, " {key} = {}", value).unwrap();
|
writeln!(buf, " {key} = {value}").unwrap();
|
||||||
}
|
}
|
||||||
writeln!(buf).unwrap();
|
writeln!(buf).unwrap();
|
||||||
|
|
||||||
|
@ -476,7 +476,7 @@ impl FilesHandle for BuildStatement<'_> {
|
||||||
let outputs = outputs.into_iter().map(|v| {
|
let outputs = outputs.into_iter().map(|v| {
|
||||||
let v = v.as_ref();
|
let v = v.as_ref();
|
||||||
let v = if !v.starts_with("$builddir/") && !v.starts_with("$builddir\\") {
|
let v = if !v.starts_with("$builddir/") && !v.starts_with("$builddir\\") {
|
||||||
format!("$builddir/{}", v)
|
format!("$builddir/{v}")
|
||||||
} else {
|
} else {
|
||||||
v.to_owned()
|
v.to_owned()
|
||||||
};
|
};
|
||||||
|
|
|
@ -162,7 +162,7 @@ impl BuildAction for CargoTest {
|
||||||
"cargo-nextest",
|
"cargo-nextest",
|
||||||
CargoInstall {
|
CargoInstall {
|
||||||
binary_name: "cargo-nextest",
|
binary_name: "cargo-nextest",
|
||||||
args: "cargo-nextest --version 0.9.57 --locked",
|
args: "cargo-nextest --version 0.9.99 --locked --no-default-features --features default-no-update",
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
setup_flags(build)
|
setup_flags(build)
|
||||||
|
|
|
@ -19,28 +19,28 @@ use crate::input::BuildInput;
|
||||||
pub fn node_archive(platform: Platform) -> OnlineArchive {
|
pub fn node_archive(platform: Platform) -> OnlineArchive {
|
||||||
match platform {
|
match platform {
|
||||||
Platform::LinuxX64 => OnlineArchive {
|
Platform::LinuxX64 => OnlineArchive {
|
||||||
url: "https://nodejs.org/dist/v20.11.0/node-v20.11.0-linux-x64.tar.xz",
|
url: "https://nodejs.org/dist/v22.17.0/node-v22.17.0-linux-x64.tar.xz",
|
||||||
sha256: "822780369d0ea309e7d218e41debbd1a03f8cdf354ebf8a4420e89f39cc2e612",
|
sha256: "325c0f1261e0c61bcae369a1274028e9cfb7ab7949c05512c5b1e630f7e80e12",
|
||||||
},
|
},
|
||||||
Platform::LinuxArm => OnlineArchive {
|
Platform::LinuxArm => OnlineArchive {
|
||||||
url: "https://nodejs.org/dist/v20.11.0/node-v20.11.0-linux-arm64.tar.xz",
|
url: "https://nodejs.org/dist/v22.17.0/node-v22.17.0-linux-arm64.tar.xz",
|
||||||
sha256: "f6df68c6793244071f69023a9b43a0cf0b13d65cbe86d55925c28e4134d9aafb",
|
sha256: "140aee84be6774f5fb3f404be72adbe8420b523f824de82daeb5ab218dab7b18",
|
||||||
},
|
},
|
||||||
Platform::MacX64 => OnlineArchive {
|
Platform::MacX64 => OnlineArchive {
|
||||||
url: "https://nodejs.org/dist/v20.11.0/node-v20.11.0-darwin-x64.tar.xz",
|
url: "https://nodejs.org/dist/v22.17.0/node-v22.17.0-darwin-x64.tar.xz",
|
||||||
sha256: "d4b4ab81ebf1f7aab09714f834992f27270ad0079600da00c8110f8950ca6c5a",
|
sha256: "f79de1f64df4ac68493a344bb5ab7d289d0275271e87b543d1278392c9de778a",
|
||||||
},
|
},
|
||||||
Platform::MacArm => OnlineArchive {
|
Platform::MacArm => OnlineArchive {
|
||||||
url: "https://nodejs.org/dist/v20.11.0/node-v20.11.0-darwin-arm64.tar.xz",
|
url: "https://nodejs.org/dist/v22.17.0/node-v22.17.0-darwin-arm64.tar.xz",
|
||||||
sha256: "f18a7438723d48417f5e9be211a2f3c0520ffbf8e02703469e5153137ca0f328",
|
sha256: "cc9cc294eaf782dd93c8c51f460da610cc35753c6a9947411731524d16e97914",
|
||||||
},
|
},
|
||||||
Platform::WindowsX64 => OnlineArchive {
|
Platform::WindowsX64 => OnlineArchive {
|
||||||
url: "https://nodejs.org/dist/v20.11.0/node-v20.11.0-win-x64.zip",
|
url: "https://nodejs.org/dist/v22.17.0/node-v22.17.0-win-x64.zip",
|
||||||
sha256: "893115cd92ad27bf178802f15247115e93c0ef0c753b93dca96439240d64feb5",
|
sha256: "721ab118a3aac8584348b132767eadf51379e0616f0db802cc1e66d7f0d98f85",
|
||||||
},
|
},
|
||||||
Platform::WindowsArm => OnlineArchive {
|
Platform::WindowsArm => OnlineArchive {
|
||||||
url: "https://nodejs.org/dist/v20.11.0/node-v20.11.0-win-arm64.zip",
|
url: "https://nodejs.org/dist/v22.17.0/node-v22.17.0-win-arm64.zip",
|
||||||
sha256: "89c1f7034dcd6ff5c17f2af61232a96162a1902f862078347dcf274a938b6142",
|
sha256: "78355dc9ca117bb71d3f081e4b1b281855e2b134f3939bb0ca314f7567b0e621",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -98,7 +98,7 @@ impl BuildAction for YarnInstall<'_> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn with_cmd_ext(bin: &str) -> Cow<str> {
|
fn with_cmd_ext(bin: &str) -> Cow<'_, str> {
|
||||||
if cfg!(windows) {
|
if cfg!(windows) {
|
||||||
format!("{bin}.cmd").into()
|
format!("{bin}.cmd").into()
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -21,26 +21,26 @@ pub fn protoc_archive(platform: Platform) -> OnlineArchive {
|
||||||
match platform {
|
match platform {
|
||||||
Platform::LinuxX64 => {
|
Platform::LinuxX64 => {
|
||||||
OnlineArchive {
|
OnlineArchive {
|
||||||
url: "https://github.com/protocolbuffers/protobuf/releases/download/v21.8/protoc-21.8-linux-x86_64.zip",
|
url: "https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip",
|
||||||
sha256: "f90d0dd59065fef94374745627336d622702b67f0319f96cee894d41a974d47a",
|
sha256: "96553041f1a91ea0efee963cb16f462f5985b4d65365f3907414c360044d8065",
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
Platform::LinuxArm => {
|
Platform::LinuxArm => {
|
||||||
OnlineArchive {
|
OnlineArchive {
|
||||||
url: "https://github.com/protocolbuffers/protobuf/releases/download/v21.8/protoc-21.8-linux-aarch_64.zip",
|
url: "https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-aarch_64.zip",
|
||||||
sha256: "f3d8eb5839d6186392d8c7b54fbeabbb6fcdd90618a500b77cb2e24faa245cad",
|
sha256: "6c554de11cea04c56ebf8e45b54434019b1cd85223d4bbd25c282425e306ecc2",
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
Platform::MacX64 | Platform::MacArm => {
|
Platform::MacX64 | Platform::MacArm => {
|
||||||
OnlineArchive {
|
OnlineArchive {
|
||||||
url: "https://github.com/protocolbuffers/protobuf/releases/download/v21.8/protoc-21.8-osx-universal_binary.zip",
|
url: "https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-osx-universal_binary.zip",
|
||||||
sha256: "e3324d3bc2e9bc967a0bec2472e0ec73b26f952c7c87f2403197414f780c3c6c",
|
sha256: "99ea004549c139f46da5638187a85bbe422d78939be0fa01af1aa8ab672e395f",
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
Platform::WindowsX64 | Platform::WindowsArm => {
|
Platform::WindowsX64 | Platform::WindowsArm => {
|
||||||
OnlineArchive {
|
OnlineArchive {
|
||||||
url: "https://github.com/protocolbuffers/protobuf/releases/download/v21.8/protoc-21.8-win64.zip",
|
url: "https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-win64.zip",
|
||||||
sha256: "3657053024faa439ff5f8c1dd2ee06bac0f9b9a3d660e99944f015a7451e87ec",
|
sha256: "70381b116ab0d71cb6a5177d9b17c7c13415866603a0fd40d513dafe32d56c35",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -148,7 +148,7 @@ impl BuildAction for PythonEnvironment {
|
||||||
// Add --python flag to extra_args if PYTHON_BINARY is set
|
// Add --python flag to extra_args if PYTHON_BINARY is set
|
||||||
let mut args = self.extra_args.to_string();
|
let mut args = self.extra_args.to_string();
|
||||||
if let Ok(python_binary) = env::var("PYTHON_BINARY") {
|
if let Ok(python_binary) = env::var("PYTHON_BINARY") {
|
||||||
args = format!("--python {} {}", python_binary, args);
|
args = format!("--python {python_binary} {args}");
|
||||||
}
|
}
|
||||||
build.add_variable("extra_args", args);
|
build.add_variable("extra_args", args);
|
||||||
}
|
}
|
||||||
|
@ -159,6 +159,10 @@ impl BuildAction for PythonEnvironment {
|
||||||
}
|
}
|
||||||
build.add_output_stamp(format!("{}/.stamp", self.venv_folder));
|
build.add_output_stamp(format!("{}/.stamp", self.venv_folder));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn check_output_timestamps(&self) -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct PythonTypecheck {
|
pub struct PythonTypecheck {
|
||||||
|
@ -189,31 +193,19 @@ impl BuildAction for PythonTypecheck {
|
||||||
struct PythonFormat<'a> {
|
struct PythonFormat<'a> {
|
||||||
pub inputs: &'a BuildInput,
|
pub inputs: &'a BuildInput,
|
||||||
pub check_only: bool,
|
pub check_only: bool,
|
||||||
pub isort_ini: &'a BuildInput,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BuildAction for PythonFormat<'_> {
|
impl BuildAction for PythonFormat<'_> {
|
||||||
fn command(&self) -> &str {
|
fn command(&self) -> &str {
|
||||||
"$black -t py39 -q $check --color $in && $
|
"$ruff format $mode $in && $ruff check --select I --fix $in"
|
||||||
$isort --color --settings-path $isort_ini $check $in"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn files(&mut self, build: &mut impl crate::build::FilesHandle) {
|
fn files(&mut self, build: &mut impl crate::build::FilesHandle) {
|
||||||
build.add_inputs("in", self.inputs);
|
build.add_inputs("in", self.inputs);
|
||||||
build.add_inputs("black", inputs![":pyenv:black"]);
|
build.add_inputs("ruff", inputs![":pyenv:ruff"]);
|
||||||
build.add_inputs("isort", inputs![":pyenv:isort"]);
|
|
||||||
|
|
||||||
let hash = simple_hash(self.inputs);
|
let hash = simple_hash(self.inputs);
|
||||||
build.add_env_var("BLACK_CACHE_DIR", "out/python/black.cache.{hash}");
|
build.add_variable("mode", if self.check_only { "--check" } else { "" });
|
||||||
build.add_inputs("isort_ini", self.isort_ini);
|
|
||||||
build.add_variable(
|
|
||||||
"check",
|
|
||||||
if self.check_only {
|
|
||||||
"--diff --check"
|
|
||||||
} else {
|
|
||||||
""
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
build.add_output_stamp(format!(
|
build.add_output_stamp(format!(
|
||||||
"tests/python_format.{}.{hash}",
|
"tests/python_format.{}.{hash}",
|
||||||
|
@ -223,13 +215,11 @@ impl BuildAction for PythonFormat<'_> {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn python_format(build: &mut Build, group: &str, inputs: BuildInput) -> Result<()> {
|
pub fn python_format(build: &mut Build, group: &str, inputs: BuildInput) -> Result<()> {
|
||||||
let isort_ini = &inputs![".isort.cfg"];
|
|
||||||
build.add_action(
|
build.add_action(
|
||||||
format!("check:format:python:{group}"),
|
format!("check:format:python:{group}"),
|
||||||
PythonFormat {
|
PythonFormat {
|
||||||
inputs: &inputs,
|
inputs: &inputs,
|
||||||
check_only: true,
|
check_only: true,
|
||||||
isort_ini,
|
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
@ -238,34 +228,39 @@ pub fn python_format(build: &mut Build, group: &str, inputs: BuildInput) -> Resu
|
||||||
PythonFormat {
|
PythonFormat {
|
||||||
inputs: &inputs,
|
inputs: &inputs,
|
||||||
check_only: false,
|
check_only: false,
|
||||||
isort_ini,
|
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct PythonLint {
|
pub struct RuffCheck {
|
||||||
pub folders: &'static [&'static str],
|
pub folders: &'static [&'static str],
|
||||||
pub pylint_ini: BuildInput,
|
|
||||||
pub deps: BuildInput,
|
pub deps: BuildInput,
|
||||||
|
pub check_only: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BuildAction for PythonLint {
|
impl BuildAction for RuffCheck {
|
||||||
fn command(&self) -> &str {
|
fn command(&self) -> &str {
|
||||||
"$pylint --rcfile $pylint_ini -sn -j $cpus $folders"
|
"$ruff check $folders $mode"
|
||||||
}
|
}
|
||||||
|
|
||||||
fn files(&mut self, build: &mut impl crate::build::FilesHandle) {
|
fn files(&mut self, build: &mut impl crate::build::FilesHandle) {
|
||||||
build.add_inputs("", &self.deps);
|
build.add_inputs("", &self.deps);
|
||||||
build.add_inputs("pylint", inputs![":pyenv:pylint"]);
|
build.add_inputs("", inputs![".ruff.toml"]);
|
||||||
build.add_inputs("pylint_ini", &self.pylint_ini);
|
build.add_inputs("ruff", inputs![":pyenv:ruff"]);
|
||||||
build.add_variable("folders", self.folders.join(" "));
|
build.add_variable("folders", self.folders.join(" "));
|
||||||
// On a 16 core system, values above 10 do not improve wall clock time,
|
build.add_variable(
|
||||||
// but waste extra cores that could be working on other tests.
|
"mode",
|
||||||
build.add_variable("cpus", num_cpus::get().min(10).to_string());
|
if self.check_only {
|
||||||
|
""
|
||||||
|
} else {
|
||||||
|
"--fix --unsafe-fixes"
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
let hash = simple_hash(&self.deps);
|
let hash = simple_hash(&self.deps);
|
||||||
build.add_output_stamp(format!("tests/python_lint.{hash}"));
|
let kind = if self.check_only { "check" } else { "fix" };
|
||||||
|
build.add_output_stamp(format!("tests/python_ruff.{kind}.{hash}"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,12 +30,12 @@ impl Build {
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
for (key, value) in &self.variables {
|
for (key, value) in &self.variables {
|
||||||
writeln!(&mut buf, "{} = {}", key, value).unwrap();
|
writeln!(&mut buf, "{key} = {value}").unwrap();
|
||||||
}
|
}
|
||||||
buf.push('\n');
|
buf.push('\n');
|
||||||
|
|
||||||
for (key, value) in &self.pools {
|
for (key, value) in &self.pools {
|
||||||
writeln!(&mut buf, "pool {}\n depth = {}", key, value).unwrap();
|
writeln!(&mut buf, "pool {key}\n depth = {value}").unwrap();
|
||||||
}
|
}
|
||||||
buf.push('\n');
|
buf.push('\n');
|
||||||
|
|
||||||
|
|
|
@ -65,7 +65,7 @@ fn sha2_data(data: &[u8]) -> String {
|
||||||
let mut digest = sha2::Sha256::new();
|
let mut digest = sha2::Sha256::new();
|
||||||
digest.update(data);
|
digest.update(data);
|
||||||
let result = digest.finalize();
|
let result = digest.finalize();
|
||||||
format!("{:x}", result)
|
format!("{result:x}")
|
||||||
}
|
}
|
||||||
|
|
||||||
enum CompressionKind {
|
enum CompressionKind {
|
||||||
|
|
|
@ -67,7 +67,10 @@ pub fn run_build(args: BuildArgs) {
|
||||||
"MYPY_CACHE_DIR",
|
"MYPY_CACHE_DIR",
|
||||||
build_root.join("tests").join("mypy").into_string(),
|
build_root.join("tests").join("mypy").into_string(),
|
||||||
)
|
)
|
||||||
.env("PYTHONPYCACHEPREFIX", build_root.join("pycache"))
|
.env(
|
||||||
|
"PYTHONPYCACHEPREFIX",
|
||||||
|
std::path::absolute(build_root.join("pycache")).unwrap(),
|
||||||
|
)
|
||||||
// commands will not show colors by default, as we do not provide a tty
|
// commands will not show colors by default, as we do not provide a tty
|
||||||
.env("FORCE_COLOR", "1")
|
.env("FORCE_COLOR", "1")
|
||||||
.env("MYPY_FORCE_COLOR", "1")
|
.env("MYPY_FORCE_COLOR", "1")
|
||||||
|
@ -135,7 +138,7 @@ fn setup_build_root() -> Utf8PathBuf {
|
||||||
true
|
true
|
||||||
};
|
};
|
||||||
if create {
|
if create {
|
||||||
println!("Switching build root to {}", new_target);
|
println!("Switching build root to {new_target}");
|
||||||
std::os::unix::fs::symlink(new_target, build_root).unwrap();
|
std::os::unix::fs::symlink(new_target, build_root).unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,10 +32,19 @@ pub fn setup_pyenv(args: PyenvArgs) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut command = Command::new(args.uv_bin);
|
||||||
|
|
||||||
|
// remove UV_* environment variables to avoid interference
|
||||||
|
for (key, _) in std::env::vars() {
|
||||||
|
if key.starts_with("UV_") || key == "VIRTUAL_ENV" {
|
||||||
|
command.env_remove(key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
run_command(
|
run_command(
|
||||||
Command::new(args.uv_bin)
|
command
|
||||||
.env("UV_PROJECT_ENVIRONMENT", args.pyenv_folder.clone())
|
.env("UV_PROJECT_ENVIRONMENT", args.pyenv_folder.clone())
|
||||||
.args(["sync", "--frozen"])
|
.args(["sync", "--locked", "--no-config"])
|
||||||
.args(args.extra_args),
|
.args(args.extra_args),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
// Copyright: Ankitects Pty Ltd and contributors
|
// Copyright: Ankitects Pty Ltd and contributors
|
||||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
use std::io::ErrorKind;
|
|
||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
|
|
||||||
use anki_io::create_dir_all;
|
use anki_io::create_dir_all;
|
||||||
|
@ -44,7 +43,7 @@ fn split_env(s: &str) -> Result<(String, String), std::io::Error> {
|
||||||
if let Some((k, v)) = s.split_once('=') {
|
if let Some((k, v)) = s.split_once('=') {
|
||||||
Ok((k.into(), v.into()))
|
Ok((k.into(), v.into()))
|
||||||
} else {
|
} else {
|
||||||
Err(std::io::Error::new(ErrorKind::Other, "invalid env var"))
|
Err(std::io::Error::other("invalid env var"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,7 +83,7 @@ fn split_args(args: Vec<String>) -> Vec<Vec<String>> {
|
||||||
|
|
||||||
pub fn run_command(command: &mut Command) {
|
pub fn run_command(command: &mut Command) {
|
||||||
if let Err(err) = command.ensure_success() {
|
if let Err(err) = command.ensure_success() {
|
||||||
println!("{}", err);
|
println!("{err}");
|
||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,11 @@ pub fn setup_yarn(args: YarnArgs) {
|
||||||
.arg("--ignore-scripts"),
|
.arg("--ignore-scripts"),
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
run_command(Command::new(&args.yarn_bin).arg("install"));
|
run_command(
|
||||||
|
Command::new(&args.yarn_bin)
|
||||||
|
.arg("install")
|
||||||
|
.arg("--immutable"),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::fs::write(args.stamp, b"").unwrap();
|
std::fs::write(args.stamp, b"").unwrap();
|
||||||
|
|
8510
cargo/licenses.json
8510
cargo/licenses.json
File diff suppressed because it is too large
Load diff
|
@ -85,7 +85,7 @@ When formatting issues are reported, they can be fixed with
|
||||||
./ninja format
|
./ninja format
|
||||||
```
|
```
|
||||||
|
|
||||||
## Fixing eslint/copyright header issues
|
## Fixing ruff/eslint/copyright header issues
|
||||||
|
|
||||||
```
|
```
|
||||||
./ninja fix
|
./ninja fix
|
||||||
|
@ -190,13 +190,10 @@ in the collection2.log file will also be printed on stdout.
|
||||||
|
|
||||||
If ANKI_PROFILE_CODE is set, Python profiling data will be written on exit.
|
If ANKI_PROFILE_CODE is set, Python profiling data will be written on exit.
|
||||||
|
|
||||||
# Binary Bundles
|
# Installer/launcher
|
||||||
|
|
||||||
Anki's official binary packages are created with `./ninja bundle`. The bundling
|
- The anki-release package is created/published with the scripts in qt/release.
|
||||||
process was created specifically for the official builds, and is provided as-is;
|
- The installer/launcher is created with the build scripts in qt/launcher/{platform}.
|
||||||
we are unfortunately not able to provide assistance with any issues you may run
|
|
||||||
into when using it. You'll need to run
|
|
||||||
`git submodule update --checkout qt/bundle/PyOxidizer` first.
|
|
||||||
|
|
||||||
## Mixing development and study
|
## Mixing development and study
|
||||||
|
|
||||||
|
|
|
@ -1,35 +1,78 @@
|
||||||
# This Dockerfile uses three stages.
|
# This is a user-contributed Dockerfile. No official support is available.
|
||||||
# 1. Compile anki (and dependencies) and build python wheels.
|
|
||||||
# 2. Create a virtual environment containing anki and its dependencies.
|
|
||||||
# 3. Create a final image that only includes anki's virtual environment and required
|
|
||||||
# system packages.
|
|
||||||
|
|
||||||
ARG PYTHON_VERSION="3.9"
|
|
||||||
ARG DEBIAN_FRONTEND="noninteractive"
|
ARG DEBIAN_FRONTEND="noninteractive"
|
||||||
|
|
||||||
# Build anki.
|
FROM ubuntu:24.04 AS build
|
||||||
FROM python:$PYTHON_VERSION AS build
|
|
||||||
RUN curl -fsSL https://github.com/bazelbuild/bazelisk/releases/download/v1.7.4/bazelisk-linux-amd64 \
|
|
||||||
> /usr/local/bin/bazel \
|
|
||||||
&& chmod +x /usr/local/bin/bazel \
|
|
||||||
# Bazel expects /usr/bin/python
|
|
||||||
&& ln -s /usr/local/bin/python /usr/bin/python
|
|
||||||
WORKDIR /opt/anki
|
WORKDIR /opt/anki
|
||||||
|
ENV PYTHON_VERSION="3.13"
|
||||||
|
|
||||||
|
|
||||||
|
# System deps
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
curl \
|
||||||
|
git \
|
||||||
|
build-essential \
|
||||||
|
pkg-config \
|
||||||
|
libssl-dev \
|
||||||
|
libbz2-dev \
|
||||||
|
libreadline-dev \
|
||||||
|
libsqlite3-dev \
|
||||||
|
libffi-dev \
|
||||||
|
zlib1g-dev \
|
||||||
|
liblzma-dev \
|
||||||
|
ca-certificates \
|
||||||
|
ninja-build \
|
||||||
|
rsync \
|
||||||
|
libglib2.0-0 \
|
||||||
|
libgl1 \
|
||||||
|
libx11-6 \
|
||||||
|
libxext6 \
|
||||||
|
libxrender1 \
|
||||||
|
libxkbcommon0 \
|
||||||
|
libxkbcommon-x11-0 \
|
||||||
|
libxcb1 \
|
||||||
|
libxcb-render0 \
|
||||||
|
libxcb-shm0 \
|
||||||
|
libxcb-icccm4 \
|
||||||
|
libxcb-image0 \
|
||||||
|
libxcb-keysyms1 \
|
||||||
|
libxcb-randr0 \
|
||||||
|
libxcb-shape0 \
|
||||||
|
libxcb-xfixes0 \
|
||||||
|
libxcb-xinerama0 \
|
||||||
|
libxcb-xinput0 \
|
||||||
|
libsm6 \
|
||||||
|
libice6 \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# install rust with rustup
|
||||||
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||||
|
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||||
|
|
||||||
|
# Install uv and Python 3.13 with uv
|
||||||
|
RUN curl -LsSf https://astral.sh/uv/install.sh | sh \
|
||||||
|
&& ln -s /root/.local/bin/uv /usr/local/bin/uv
|
||||||
|
ENV PATH="/root/.local/bin:${PATH}"
|
||||||
|
|
||||||
|
RUN uv python install ${PYTHON_VERSION} --default
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
# Build python wheels.
|
|
||||||
RUN ./tools/build
|
RUN ./tools/build
|
||||||
|
|
||||||
|
|
||||||
# Install pre-compiled Anki.
|
# Install pre-compiled Anki.
|
||||||
FROM python:${PYTHON_VERSION}-slim as installer
|
FROM python:3.13-slim AS installer
|
||||||
WORKDIR /opt/anki/
|
WORKDIR /opt/anki/
|
||||||
COPY --from=build /opt/anki/wheels/ wheels/
|
COPY --from=build /opt/anki/out/wheels/ wheels/
|
||||||
# Use virtual environment.
|
# Use virtual environment.
|
||||||
RUN python -m venv venv \
|
RUN python -m venv venv \
|
||||||
&& ./venv/bin/python -m pip install --no-cache-dir setuptools wheel \
|
&& ./venv/bin/python -m pip install --no-cache-dir setuptools wheel \
|
||||||
&& ./venv/bin/python -m pip install --no-cache-dir /opt/anki/wheels/*.whl
|
&& ./venv/bin/python -m pip install --no-cache-dir /opt/anki/wheels/*.whl
|
||||||
|
|
||||||
|
|
||||||
# We use another build stage here so we don't include the wheels in the final image.
|
# We use another build stage here so we don't include the wheels in the final image.
|
||||||
FROM python:${PYTHON_VERSION}-slim as final
|
FROM python:3.13-slim AS final
|
||||||
COPY --from=installer /opt/anki/venv /opt/anki/venv
|
COPY --from=installer /opt/anki/venv /opt/anki/venv
|
||||||
ENV PATH=/opt/anki/venv/bin:$PATH
|
ENV PATH=/opt/anki/venv/bin:$PATH
|
||||||
# Install run-time dependencies.
|
# Install run-time dependencies.
|
||||||
|
@ -59,9 +102,9 @@ RUN apt-get update \
|
||||||
libxrender1 \
|
libxrender1 \
|
||||||
libxtst6 \
|
libxtst6 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Add non-root user.
|
# Add non-root user.
|
||||||
RUN useradd --create-home anki
|
RUN useradd --create-home anki
|
||||||
USER anki
|
USER anki
|
||||||
WORKDIR /work
|
WORKDIR /work
|
||||||
ENTRYPOINT ["/opt/anki/venv/bin/anki"]
|
ENTRYPOINT ["/opt/anki/venv/bin/anki"]
|
||||||
LABEL maintainer="Jakub Kaczmarzyk <jakub.kaczmarzyk@gmail.com>"
|
|
||||||
|
|
|
@ -51,13 +51,8 @@ Anki requires a recent glibc.
|
||||||
|
|
||||||
If you are using a distro that uses musl, Anki will not work.
|
If you are using a distro that uses musl, Anki will not work.
|
||||||
|
|
||||||
If your glibc version is 2.35+ on AMD64 or 2.39+ on ARM64, you can skip the rest of this section.
|
You can use your system's Qt libraries if they are Qt 6.2 or later, if
|
||||||
|
you wish. After installing the system libraries (eg:
|
||||||
If your system has an older glibc, you won't be able to use the PyQt wheels that are
|
|
||||||
available in pip/PyPy, and will need to use your system-installed PyQt instead.
|
|
||||||
Your distro will also need to have Python 3.9 or later.
|
|
||||||
|
|
||||||
After installing the system libraries (eg:
|
|
||||||
'sudo apt install python3-pyqt6.qt{quick,webengine} python3-venv pyqt6-dev-tools'),
|
'sudo apt install python3-pyqt6.qt{quick,webengine} python3-venv pyqt6-dev-tools'),
|
||||||
find the place they are installed (eg '/usr/lib/python3/dist-packages'). On modern Ubuntu, you'll
|
find the place they are installed (eg '/usr/lib/python3/dist-packages'). On modern Ubuntu, you'll
|
||||||
also need 'sudo apt remove python3-protobuf'. Then before running any commands like './run', tell Anki where
|
also need 'sudo apt remove python3-protobuf'. Then before running any commands like './run', tell Anki where
|
||||||
|
@ -68,12 +63,6 @@ export PYTHONPATH=/usr/lib/python3/dist-packages
|
||||||
export PYTHON_BINARY=/usr/bin/python3
|
export PYTHON_BINARY=/usr/bin/python3
|
||||||
```
|
```
|
||||||
|
|
||||||
There are a few things to be aware of:
|
|
||||||
|
|
||||||
- You should use ./run and not tools/run-qt5\*, even if your system libraries are Qt5.
|
|
||||||
- If your system libraries are Qt5, when creating an aqt wheel, the wheel will not work
|
|
||||||
on Qt6 environments.
|
|
||||||
|
|
||||||
## Packaging considerations
|
## Packaging considerations
|
||||||
|
|
||||||
Python, node and protoc are downloaded as part of the build. You can optionally define
|
Python, node and protoc are downloaded as part of the build. You can optionally define
|
||||||
|
|
|
@ -98,12 +98,6 @@ should preferably be assigned a number between 1 and 15. If a message contains
|
||||||
|
|
||||||
Protobuf has an official Python implementation with an extensive [reference](https://developers.google.com/protocol-buffers/docs/reference/python-generated).
|
Protobuf has an official Python implementation with an extensive [reference](https://developers.google.com/protocol-buffers/docs/reference/python-generated).
|
||||||
|
|
||||||
- Every message used in aqt or pylib must be added to the respective `.pylintrc`
|
|
||||||
to avoid failing type checks. The unqualified protobuf message's name must be
|
|
||||||
used, not an alias from `collection.py` for example. This should be taken into
|
|
||||||
account when choosing a message name in order to prevent skipping typechecking
|
|
||||||
a Python class of the same name.
|
|
||||||
|
|
||||||
### Typescript
|
### Typescript
|
||||||
|
|
||||||
Anki uses [protobuf-es](https://github.com/bufbuild/protobuf-es), which offers
|
Anki uses [protobuf-es](https://github.com/bufbuild/protobuf-es), which offers
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 78412ce163d4dc50dd82f5b27cde3119086a2eb7
|
Subproject commit 480ef0da728c7ea3485c58529ae7ee02be3e5dba
|
|
@ -60,7 +60,6 @@ card-templates-this-will-create-card-proceed =
|
||||||
}
|
}
|
||||||
card-templates-type-boxes-warning = Only one typing box per card template is supported.
|
card-templates-type-boxes-warning = Only one typing box per card template is supported.
|
||||||
card-templates-restore-to-default = Restore to Default
|
card-templates-restore-to-default = Restore to Default
|
||||||
card-templates-restore-to-default-confirmation = This will reset all fields and templates in this note type to their default
|
card-templates-restore-to-default-confirmation = This will reset all fields and templates in this note type to their default values, removing any extra fields/templates and their content, and any custom styling. Do you wish to proceed?
|
||||||
values, removing any extra fields/templates and their content, and any custom styling. Do you wish to proceed?
|
|
||||||
card-templates-restored-to-default = Note type has been restored to its original state.
|
card-templates-restored-to-default = Note type has been restored to its original state.
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,11 @@ database-check-card-properties =
|
||||||
[one] Fixed { $count } invalid card property.
|
[one] Fixed { $count } invalid card property.
|
||||||
*[other] Fixed { $count } invalid card properties.
|
*[other] Fixed { $count } invalid card properties.
|
||||||
}
|
}
|
||||||
|
database-check-card-last-review-time-empty =
|
||||||
|
{ $count ->
|
||||||
|
[one] Added last review time to { $count } card.
|
||||||
|
*[other] Added last review time to { $count } cards.
|
||||||
|
}
|
||||||
database-check-missing-templates =
|
database-check-missing-templates =
|
||||||
{ $count ->
|
{ $count ->
|
||||||
[one] Deleted { $count } card with missing template.
|
[one] Deleted { $count } card with missing template.
|
||||||
|
|
|
@ -307,7 +307,8 @@ deck-config-new-interval-tooltip = The multiplier applied to a review interval w
|
||||||
deck-config-minimum-interval-tooltip = The minimum interval given to a review card after answering `Again`.
|
deck-config-minimum-interval-tooltip = The minimum interval given to a review card after answering `Again`.
|
||||||
deck-config-custom-scheduling = Custom scheduling
|
deck-config-custom-scheduling = Custom scheduling
|
||||||
deck-config-custom-scheduling-tooltip = Affects the entire collection. Use at your own risk!
|
deck-config-custom-scheduling-tooltip = Affects the entire collection. Use at your own risk!
|
||||||
# Easy Days section
|
|
||||||
|
## Easy Days section.
|
||||||
|
|
||||||
deck-config-easy-days-title = Easy Days
|
deck-config-easy-days-title = Easy Days
|
||||||
deck-config-easy-days-monday = Mon
|
deck-config-easy-days-monday = Mon
|
||||||
|
@ -383,8 +384,6 @@ deck-config-which-deck = Which deck would you like to display options for?
|
||||||
deck-config-updating-cards = Updating cards: { $current_cards_count }/{ $total_cards_count }...
|
deck-config-updating-cards = Updating cards: { $current_cards_count }/{ $total_cards_count }...
|
||||||
deck-config-invalid-parameters = The provided FSRS parameters are invalid. Leave them blank to use the default parameters.
|
deck-config-invalid-parameters = The provided FSRS parameters are invalid. Leave them blank to use the default parameters.
|
||||||
deck-config-not-enough-history = Insufficient review history to perform this operation.
|
deck-config-not-enough-history = Insufficient review history to perform this operation.
|
||||||
deck-config-unable-to-determine-desired-retention =
|
|
||||||
Unable to determine a minimum recommended retention.
|
|
||||||
deck-config-must-have-400-reviews =
|
deck-config-must-have-400-reviews =
|
||||||
{ $count ->
|
{ $count ->
|
||||||
[one] Only { $count } review was found.
|
[one] Only { $count } review was found.
|
||||||
|
@ -393,10 +392,9 @@ deck-config-must-have-400-reviews =
|
||||||
# Numbers that control how aggressively the FSRS algorithm schedules cards
|
# Numbers that control how aggressively the FSRS algorithm schedules cards
|
||||||
deck-config-weights = FSRS parameters
|
deck-config-weights = FSRS parameters
|
||||||
deck-config-compute-optimal-weights = Optimize FSRS parameters
|
deck-config-compute-optimal-weights = Optimize FSRS parameters
|
||||||
deck-config-compute-minimum-recommended-retention = Minimum recommended retention
|
|
||||||
deck-config-optimize-button = Optimize Current Preset
|
deck-config-optimize-button = Optimize Current Preset
|
||||||
|
# Indicates that a given function or label, provided via the "text" variable, operates slowly.
|
||||||
deck-config-slow-suffix = { $text } (slow)
|
deck-config-slow-suffix = { $text } (slow)
|
||||||
deck-config-health-check = Check health when optimizing
|
|
||||||
deck-config-compute-button = Compute
|
deck-config-compute-button = Compute
|
||||||
deck-config-ignore-before = Ignore cards reviewed before
|
deck-config-ignore-before = Ignore cards reviewed before
|
||||||
deck-config-time-to-optimize = It's been a while - using the Optimize All Presets button is recommended.
|
deck-config-time-to-optimize = It's been a while - using the Optimize All Presets button is recommended.
|
||||||
|
@ -406,7 +404,6 @@ deck-config-historical-retention = Historical retention
|
||||||
deck-config-smaller-is-better = Smaller numbers indicate a better fit to your review history.
|
deck-config-smaller-is-better = Smaller numbers indicate a better fit to your review history.
|
||||||
deck-config-steps-too-large-for-fsrs = When FSRS is enabled, steps of 1 day or more are not recommended.
|
deck-config-steps-too-large-for-fsrs = When FSRS is enabled, steps of 1 day or more are not recommended.
|
||||||
deck-config-get-params = Get Params
|
deck-config-get-params = Get Params
|
||||||
deck-config-predicted-minimum-recommended-retention = Minimum recommended retention: { $num }
|
|
||||||
deck-config-complete = { $num }% complete.
|
deck-config-complete = { $num }% complete.
|
||||||
deck-config-iterations = Iteration: { $count }...
|
deck-config-iterations = Iteration: { $count }...
|
||||||
deck-config-reschedule-cards-on-change = Reschedule cards on change
|
deck-config-reschedule-cards-on-change = Reschedule cards on change
|
||||||
|
@ -424,6 +421,8 @@ deck-config-desired-retention-tooltip =
|
||||||
less frequently, and you will forget more of them. Be conservative when adjusting this - higher
|
less frequently, and you will forget more of them. Be conservative when adjusting this - higher
|
||||||
values will greatly increase your workload, and lower values can be demoralizing when you forget
|
values will greatly increase your workload, and lower values can be demoralizing when you forget
|
||||||
a lot of material.
|
a lot of material.
|
||||||
|
deck-config-desired-retention-tooltip2 =
|
||||||
|
The workload values provided by the info box are a rough approximation. For a greater level of accuracy, use the simulator.
|
||||||
deck-config-historical-retention-tooltip =
|
deck-config-historical-retention-tooltip =
|
||||||
When some of your review history is missing, FSRS needs to fill in the gaps. By default, it will
|
When some of your review history is missing, FSRS needs to fill in the gaps. By default, it will
|
||||||
assume that when you did those old reviews, you remembered 90% of the material. If your old retention
|
assume that when you did those old reviews, you remembered 90% of the material. If your old retention
|
||||||
|
@ -465,12 +464,7 @@ deck-config-compute-optimal-weights-tooltip2 =
|
||||||
By default, parameters will be calculated from the review history of all decks using the current preset. You can
|
By default, parameters will be calculated from the review history of all decks using the current preset. You can
|
||||||
optionally adjust the search before calculating the parameters, if you'd like to alter which cards are used for
|
optionally adjust the search before calculating the parameters, if you'd like to alter which cards are used for
|
||||||
optimizing the parameters.
|
optimizing the parameters.
|
||||||
deck-config-compute-optimal-retention-tooltip4 =
|
|
||||||
This tool will attempt to find the desired retention value
|
|
||||||
that will lead to the most material learnt, in the least amount of time. The calculated number can serve as a reference
|
|
||||||
when deciding what to set your desired retention to. You may wish to choose a higher desired retention if you’re
|
|
||||||
willing to invest more study time to achieve it. Setting your desired retention lower than the minimum
|
|
||||||
is not recommended, as it will lead to a higher workload, because of the high forgetting rate.
|
|
||||||
deck-config-please-save-your-changes-first = Please save your changes first.
|
deck-config-please-save-your-changes-first = Please save your changes first.
|
||||||
deck-config-workload-factor-change = Approximate workload: {$factor}x
|
deck-config-workload-factor-change = Approximate workload: {$factor}x
|
||||||
(compared to {$previousDR}% desired retention)
|
(compared to {$previousDR}% desired retention)
|
||||||
|
@ -484,20 +478,11 @@ deck-config-percent-of-reviews =
|
||||||
*[other] { $pct }% of { $reviews } reviews
|
*[other] { $pct }% of { $reviews } reviews
|
||||||
}
|
}
|
||||||
deck-config-percent-input = { $pct }%
|
deck-config-percent-input = { $pct }%
|
||||||
|
# This message appears during FSRS parameter optimization.
|
||||||
deck-config-checking-for-improvement = Checking for improvement...
|
deck-config-checking-for-improvement = Checking for improvement...
|
||||||
deck-config-optimizing-preset = Optimizing preset { $current_count }/{ $total_count }...
|
deck-config-optimizing-preset = Optimizing preset { $current_count }/{ $total_count }...
|
||||||
deck-config-fsrs-must-be-enabled = FSRS must be enabled first.
|
deck-config-fsrs-must-be-enabled = FSRS must be enabled first.
|
||||||
deck-config-fsrs-params-optimal = The FSRS parameters currently appear to be optimal.
|
deck-config-fsrs-params-optimal = The FSRS parameters currently appear to be optimal.
|
||||||
deck-config-fsrs-bad-fit-warning = Health Check:
|
|
||||||
Your memory is difficult for FSRS to predict. Recommendations:
|
|
||||||
|
|
||||||
- Suspend or reformulate leeches.
|
|
||||||
- Use the answer buttons consistently. Keep in mind that "Hard" is a passing grade, not a failing grade.
|
|
||||||
- Understand before you memorize.
|
|
||||||
|
|
||||||
If you follow these suggestions, performance will usually improve over the next few months.
|
|
||||||
deck-config-fsrs-good-fit = Health Check:
|
|
||||||
FSRS can adapt to your memory well.
|
|
||||||
|
|
||||||
deck-config-fsrs-params-no-reviews = No reviews found. Make sure this preset is assigned to all decks (including subdecks) that you want to optimize, and try again.
|
deck-config-fsrs-params-no-reviews = No reviews found. Make sure this preset is assigned to all decks (including subdecks) that you want to optimize, and try again.
|
||||||
|
|
||||||
|
@ -511,7 +496,10 @@ deck-config-desired-retention-below-optimal = Your desired retention is below op
|
||||||
# Description of the y axis in the FSRS simulation
|
# Description of the y axis in the FSRS simulation
|
||||||
# diagram (Deck options -> FSRS) showing the total number of
|
# diagram (Deck options -> FSRS) showing the total number of
|
||||||
# cards that can be recalled or retrieved on a specific date.
|
# cards that can be recalled or retrieved on a specific date.
|
||||||
deck-config-fsrs-simulator-experimental = FSRS simulator (experimental)
|
deck-config-fsrs-simulator-experimental = FSRS Simulator (Experimental)
|
||||||
|
deck-config-fsrs-simulate-desired-retention-experimental = FSRS Desired Retention Simulator (Experimental)
|
||||||
|
deck-config-fsrs-simulate-save-preset = After optimizing, please save your deck preset before running the simulator.
|
||||||
|
deck-config-fsrs-desired-retention-help-me-decide-experimental = Help Me Decide (Experimental)
|
||||||
deck-config-additional-new-cards-to-simulate = Additional new cards to simulate
|
deck-config-additional-new-cards-to-simulate = Additional new cards to simulate
|
||||||
deck-config-simulate = Simulate
|
deck-config-simulate = Simulate
|
||||||
deck-config-clear-last-simulate = Clear Last Simulation
|
deck-config-clear-last-simulate = Clear Last Simulation
|
||||||
|
@ -520,13 +508,45 @@ deck-config-advanced-settings = Advanced Settings
|
||||||
deck-config-smooth-graph = Smooth graph
|
deck-config-smooth-graph = Smooth graph
|
||||||
deck-config-suspend-leeches = Suspend leeches
|
deck-config-suspend-leeches = Suspend leeches
|
||||||
deck-config-save-options-to-preset = Save Changes to Preset
|
deck-config-save-options-to-preset = Save Changes to Preset
|
||||||
|
deck-config-save-options-to-preset-confirm = Overwrite the options in your current preset with the options that are currently set in the simulator?
|
||||||
# Radio button in the FSRS simulation diagram (Deck options -> FSRS) selecting
|
# Radio button in the FSRS simulation diagram (Deck options -> FSRS) selecting
|
||||||
# to show the total number of cards that can be recalled or retrieved on a
|
# to show the total number of cards that can be recalled or retrieved on a
|
||||||
# specific date.
|
# specific date.
|
||||||
deck-config-fsrs-simulator-radio-memorized = Memorized
|
deck-config-fsrs-simulator-radio-memorized = Memorized
|
||||||
|
deck-config-fsrs-simulator-radio-ratio = Time / Memorized Ratio
|
||||||
|
# $time here is pre-formatted e.g. "10 Seconds"
|
||||||
|
deck-config-fsrs-simulator-ratio-tooltip = { $time } per memorized card
|
||||||
|
|
||||||
|
## Messages related to the FSRS scheduler’s health check. The health check determines whether the correlation between FSRS predictions and your memory is good or bad. It can be optionally triggered as part of the "Optimize" function.
|
||||||
|
|
||||||
|
# Checkbox
|
||||||
|
deck-config-health-check = Check health when optimizing
|
||||||
|
# Message box showing the result of the health check
|
||||||
|
deck-config-fsrs-bad-fit-warning = Health Check:
|
||||||
|
Your memory is difficult for FSRS to predict. Recommendations:
|
||||||
|
|
||||||
|
- Suspend or reformulate any cards you constantly forget.
|
||||||
|
- Use the answer buttons consistently. Keep in mind that "Hard" is a passing grade, not a failing grade.
|
||||||
|
- Understand before you memorize.
|
||||||
|
|
||||||
|
If you follow these suggestions, performance will usually improve over the next few months.
|
||||||
|
# Message box showing the result of the health check
|
||||||
|
deck-config-fsrs-good-fit = Health Check:
|
||||||
|
FSRS can adapt to your memory well.
|
||||||
|
|
||||||
## NO NEED TO TRANSLATE. This text is no longer used by Anki, and will be removed in the future.
|
## NO NEED TO TRANSLATE. This text is no longer used by Anki, and will be removed in the future.
|
||||||
|
|
||||||
|
deck-config-unable-to-determine-desired-retention =
|
||||||
|
Unable to determine a minimum recommended retention.
|
||||||
|
deck-config-predicted-minimum-recommended-retention = Minimum recommended retention: { $num }
|
||||||
|
deck-config-compute-minimum-recommended-retention = Minimum recommended retention
|
||||||
|
deck-config-compute-optimal-retention-tooltip4 =
|
||||||
|
This tool will attempt to find the desired retention value
|
||||||
|
that will lead to the most material learnt, in the least amount of time. The calculated number can serve as a reference
|
||||||
|
when deciding what to set your desired retention to. You may wish to choose a higher desired retention if you’re
|
||||||
|
willing to invest more study time to achieve it. Setting your desired retention lower than the minimum
|
||||||
|
is not recommended, as it will lead to a higher workload, because of the high forgetting rate.
|
||||||
|
deck-config-plotted-on-x-axis = (Plotted on the X-axis)
|
||||||
deck-config-a-100-day-interval =
|
deck-config-a-100-day-interval =
|
||||||
{ $days ->
|
{ $days ->
|
||||||
[one] A 100 day interval will become { $days } day.
|
[one] A 100 day interval will become { $days } day.
|
||||||
|
|
|
@ -48,6 +48,7 @@ importing-merge-notetypes-help =
|
||||||
Warning: This will require a one-way sync, and may mark existing notes as modified.
|
Warning: This will require a one-way sync, and may mark existing notes as modified.
|
||||||
importing-mnemosyne-20-deck-db = Mnemosyne 2.0 Deck (*.db)
|
importing-mnemosyne-20-deck-db = Mnemosyne 2.0 Deck (*.db)
|
||||||
importing-multicharacter-separators-are-not-supported-please = Multi-character separators are not supported. Please enter one character only.
|
importing-multicharacter-separators-are-not-supported-please = Multi-character separators are not supported. Please enter one character only.
|
||||||
|
importing-new-deck-will-be-created = A new deck will be created: { $name }
|
||||||
importing-notes-added-from-file = Notes added from file: { $val }
|
importing-notes-added-from-file = Notes added from file: { $val }
|
||||||
importing-notes-found-in-file = Notes found in file: { $val }
|
importing-notes-found-in-file = Notes found in file: { $val }
|
||||||
importing-notes-skipped-as-theyre-already-in = Notes skipped, as up-to-date copies are already in your collection: { $val }
|
importing-notes-skipped-as-theyre-already-in = Notes skipped, as up-to-date copies are already in your collection: { $val }
|
||||||
|
@ -65,7 +66,6 @@ importing-with-deck-configs-help =
|
||||||
If enabled, any deck options that the deck sharer included will also be imported.
|
If enabled, any deck options that the deck sharer included will also be imported.
|
||||||
Otherwise, all decks will be assigned the default preset.
|
Otherwise, all decks will be assigned the default preset.
|
||||||
importing-packaged-anki-deckcollection-apkg-colpkg-zip = Packaged Anki Deck/Collection (*.apkg *.colpkg *.zip)
|
importing-packaged-anki-deckcollection-apkg-colpkg-zip = Packaged Anki Deck/Collection (*.apkg *.colpkg *.zip)
|
||||||
importing-pauker-18-lesson-paugz = Pauker 1.8 Lesson (*.pau.gz)
|
|
||||||
# the '|' character
|
# the '|' character
|
||||||
importing-pipe = Pipe
|
importing-pipe = Pipe
|
||||||
# Warning displayed when the csv import preview table is clipped (some columns were hidden)
|
# Warning displayed when the csv import preview table is clipped (some columns were hidden)
|
||||||
|
@ -78,7 +78,6 @@ importing-rows-had-num1d-fields-expected-num2d = '{ $row }' had { $found } field
|
||||||
importing-selected-file-was-not-in-utf8 = Selected file was not in UTF-8 format. Please see the importing section of the manual.
|
importing-selected-file-was-not-in-utf8 = Selected file was not in UTF-8 format. Please see the importing section of the manual.
|
||||||
importing-semicolon = Semicolon
|
importing-semicolon = Semicolon
|
||||||
importing-skipped = Skipped
|
importing-skipped = Skipped
|
||||||
importing-supermemo-xml-export-xml = Supermemo XML export (*.xml)
|
|
||||||
importing-tab = Tab
|
importing-tab = Tab
|
||||||
importing-tag-modified-notes = Tag modified notes:
|
importing-tag-modified-notes = Tag modified notes:
|
||||||
importing-text-separated-by-tabs-or-semicolons = Text separated by tabs or semicolons (*)
|
importing-text-separated-by-tabs-or-semicolons = Text separated by tabs or semicolons (*)
|
||||||
|
@ -252,3 +251,5 @@ importing-importing-collection = Importing collection...
|
||||||
importing-unable-to-import-filename = Unable to import { $filename }: file type not supported
|
importing-unable-to-import-filename = Unable to import { $filename }: file type not supported
|
||||||
importing-notes-that-could-not-be-imported = Notes that could not be imported as note type has changed: { $val }
|
importing-notes-that-could-not-be-imported = Notes that could not be imported as note type has changed: { $val }
|
||||||
importing-added = Added
|
importing-added = Added
|
||||||
|
importing-pauker-18-lesson-paugz = Pauker 1.8 Lesson (*.pau.gz)
|
||||||
|
importing-supermemo-xml-export-xml = Supermemo XML export (*.xml)
|
||||||
|
|
|
@ -34,7 +34,7 @@ preferences-when-adding-default-to-current-deck = When adding, default to curren
|
||||||
preferences-you-can-restore-backups-via-fileswitch = You can restore backups via File > Switch Profile.
|
preferences-you-can-restore-backups-via-fileswitch = You can restore backups via File > Switch Profile.
|
||||||
preferences-legacy-timezone-handling = Legacy timezone handling (buggy, but required for AnkiDroid <= 2.14)
|
preferences-legacy-timezone-handling = Legacy timezone handling (buggy, but required for AnkiDroid <= 2.14)
|
||||||
preferences-default-search-text = Default search text
|
preferences-default-search-text = Default search text
|
||||||
preferences-default-search-text-example = eg. 'deck:current '
|
preferences-default-search-text-example = e.g. "deck:current"
|
||||||
preferences-theme = Theme
|
preferences-theme = Theme
|
||||||
preferences-theme-follow-system = Follow System
|
preferences-theme-follow-system = Follow System
|
||||||
preferences-theme-light = Light
|
preferences-theme-light = Light
|
||||||
|
|
|
@ -80,7 +80,7 @@ statistics-reviews =
|
||||||
# This fragment of the tooltip in the FSRS simulation
|
# This fragment of the tooltip in the FSRS simulation
|
||||||
# diagram (Deck options -> FSRS) shows the total number of
|
# diagram (Deck options -> FSRS) shows the total number of
|
||||||
# cards that can be recalled or retrieved on a specific date.
|
# cards that can be recalled or retrieved on a specific date.
|
||||||
statistics-memorized = {$memorized} memorized
|
statistics-memorized = {$memorized} cards memorized
|
||||||
statistics-today-title = Today
|
statistics-today-title = Today
|
||||||
statistics-today-again-count = Again count:
|
statistics-today-again-count = Again count:
|
||||||
statistics-today-type-counts = Learn: { $learnCount }, Review: { $reviewCount }, Relearn: { $relearnCount }, Filtered: { $filteredCount }
|
statistics-today-type-counts = Learn: { $learnCount }, Review: { $reviewCount }, Relearn: { $relearnCount }, Filtered: { $filteredCount }
|
||||||
|
@ -99,9 +99,9 @@ statistics-counts-relearning-cards = Relearning
|
||||||
statistics-counts-title = Card Counts
|
statistics-counts-title = Card Counts
|
||||||
statistics-counts-separate-suspended-buried-cards = Separate suspended/buried cards
|
statistics-counts-separate-suspended-buried-cards = Separate suspended/buried cards
|
||||||
|
|
||||||
## True Retention represents your actual retention rate from past reviews, in
|
## Retention represents your actual retention from past reviews, in
|
||||||
## comparison to the "desired retention" parameter of FSRS, which forecasts
|
## comparison to the "desired retention" setting of FSRS, which forecasts
|
||||||
## future retention. True Retention is the percentage of all reviewed cards
|
## future retention. Retention is the percentage of all reviewed cards
|
||||||
## that were marked as "Hard," "Good," or "Easy" within a specific time period.
|
## that were marked as "Hard," "Good," or "Easy" within a specific time period.
|
||||||
##
|
##
|
||||||
## Most of these strings are used as column / row headings in a table.
|
## Most of these strings are used as column / row headings in a table.
|
||||||
|
@ -112,9 +112,9 @@ statistics-counts-separate-suspended-buried-cards = Separate suspended/buried ca
|
||||||
## N.B. Stats cards may be very small on mobile devices and when the Stats
|
## N.B. Stats cards may be very small on mobile devices and when the Stats
|
||||||
## window is certain sizes.
|
## window is certain sizes.
|
||||||
|
|
||||||
statistics-true-retention-title = True Retention
|
statistics-true-retention-title = Retention
|
||||||
statistics-true-retention-subtitle = Pass rate of cards with an interval ≥ 1 day.
|
statistics-true-retention-subtitle = Pass rate of cards with an interval ≥ 1 day.
|
||||||
statistics-true-retention-tooltip = If you are using FSRS, your true retention is expected to be close to your desired retention. Please keep in mind that data for a single day is noisy, so it's better to look at monthly data.
|
statistics-true-retention-tooltip = If you are using FSRS, your retention is expected to be close to your desired retention. Please keep in mind that data for a single day is noisy, so it's better to look at monthly data.
|
||||||
statistics-true-retention-range = Range
|
statistics-true-retention-range = Range
|
||||||
statistics-true-retention-pass = Pass
|
statistics-true-retention-pass = Pass
|
||||||
statistics-true-retention-fail = Fail
|
statistics-true-retention-fail = Fail
|
||||||
|
|
|
@ -46,6 +46,20 @@ studying-type-answer-unknown-field = Type answer: unknown field { $val }
|
||||||
studying-unbury = Unbury
|
studying-unbury = Unbury
|
||||||
studying-what-would-you-like-to-unbury = What would you like to unbury?
|
studying-what-would-you-like-to-unbury = What would you like to unbury?
|
||||||
studying-you-havent-recorded-your-voice-yet = You haven't recorded your voice yet.
|
studying-you-havent-recorded-your-voice-yet = You haven't recorded your voice yet.
|
||||||
|
studying-card-studied-in-minute =
|
||||||
|
{ $cards ->
|
||||||
|
[one] { $cards } card
|
||||||
|
*[other] { $cards } cards
|
||||||
|
} studied in
|
||||||
|
{ $minutes ->
|
||||||
|
[one] { $minutes } minute.
|
||||||
|
*[other] { $minutes } minutes.
|
||||||
|
}
|
||||||
|
studying-question-time-elapsed = Question time elapsed
|
||||||
|
studying-answer-time-elapsed = Answer time elapsed
|
||||||
|
|
||||||
|
## OBSOLETE; you do not need to translate this
|
||||||
|
|
||||||
studying-card-studied-in =
|
studying-card-studied-in =
|
||||||
{ $count ->
|
{ $count ->
|
||||||
[one] { $count } card studied in
|
[one] { $count } card studied in
|
||||||
|
@ -56,5 +70,3 @@ studying-minute =
|
||||||
[one] { $count } minute.
|
[one] { $count } minute.
|
||||||
*[other] { $count } minutes.
|
*[other] { $count } minutes.
|
||||||
}
|
}
|
||||||
studying-question-time-elapsed = Question time elapsed
|
|
||||||
studying-answer-time-elapsed = Answer time elapsed
|
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit fbe9d1c731f7ad09953e63fdb0c455a6d3a3b6be
|
Subproject commit fd5f984785ad07a0d3dbd893ee3d7e3671eaebd6
|
|
@ -1,4 +1,5 @@
|
||||||
qt-accel-about = &About
|
qt-accel-about = &About
|
||||||
|
qt-accel-about-mac = About Anki...
|
||||||
qt-accel-cards = &Cards
|
qt-accel-cards = &Cards
|
||||||
qt-accel-check-database = &Check Database
|
qt-accel-check-database = &Check Database
|
||||||
qt-accel-check-media = Check &Media
|
qt-accel-check-media = Check &Media
|
||||||
|
@ -45,3 +46,4 @@ qt-accel-zoom-editor-in = Zoom Editor &In
|
||||||
qt-accel-zoom-editor-out = Zoom Editor &Out
|
qt-accel-zoom-editor-out = Zoom Editor &Out
|
||||||
qt-accel-create-backup = Create &Backup
|
qt-accel-create-backup = Create &Backup
|
||||||
qt-accel-load-backup = &Revert to Backup
|
qt-accel-load-backup = &Revert to Backup
|
||||||
|
qt-accel-upgrade-downgrade = Upgrade/Downgrade
|
||||||
|
|
|
@ -73,6 +73,7 @@ qt-misc-second =
|
||||||
qt-misc-layout-auto-enabled = Responsive layout enabled
|
qt-misc-layout-auto-enabled = Responsive layout enabled
|
||||||
qt-misc-layout-vertical-enabled = Vertical layout enabled
|
qt-misc-layout-vertical-enabled = Vertical layout enabled
|
||||||
qt-misc-layout-horizontal-enabled = Horizontal layout enabled
|
qt-misc-layout-horizontal-enabled = Horizontal layout enabled
|
||||||
|
qt-misc-open-anki-launcher = Change to a different Anki version?
|
||||||
|
|
||||||
## deprecated- these strings will be removed in the future, and do not need
|
## deprecated- these strings will be removed in the future, and do not need
|
||||||
## to be translated
|
## to be translated
|
||||||
|
|
|
@ -435,7 +435,7 @@ impl TextWriter {
|
||||||
item = item.trim_start_matches(' ');
|
item = item.trim_start_matches(' ');
|
||||||
}
|
}
|
||||||
|
|
||||||
write!(self.buffer, "{}", item)
|
write!(self.buffer, "{item}")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_char_into_indent(&mut self, ch: char) {
|
fn write_char_into_indent(&mut self, ch: char) {
|
||||||
|
|
|
@ -67,7 +67,7 @@ fn additional_template_folder(dst_folder: &Utf8Path) -> Option<Utf8PathBuf> {
|
||||||
|
|
||||||
fn all_langs(lang_folder: &Utf8Path) -> Result<Vec<Utf8PathBuf>> {
|
fn all_langs(lang_folder: &Utf8Path) -> Result<Vec<Utf8PathBuf>> {
|
||||||
std::fs::read_dir(lang_folder)
|
std::fs::read_dir(lang_folder)
|
||||||
.with_context(|| format!("reading {:?}", lang_folder))?
|
.with_context(|| format!("reading {lang_folder:?}"))?
|
||||||
.filter_map(Result::ok)
|
.filter_map(Result::ok)
|
||||||
.map(|e| Ok(e.path().utf8()?))
|
.map(|e| Ok(e.path().utf8()?))
|
||||||
.collect()
|
.collect()
|
||||||
|
|
22
package.json
22
package.json
|
@ -19,8 +19,8 @@
|
||||||
"@poppanator/sveltekit-svg": "^5.0.0",
|
"@poppanator/sveltekit-svg": "^5.0.0",
|
||||||
"@sqltools/formatter": "^1.2.2",
|
"@sqltools/formatter": "^1.2.2",
|
||||||
"@sveltejs/adapter-static": "^3.0.0",
|
"@sveltejs/adapter-static": "^3.0.0",
|
||||||
"@sveltejs/kit": "^2.20.7",
|
"@sveltejs/kit": "^2.22.2",
|
||||||
"@sveltejs/vite-plugin-svelte": "4.0.0",
|
"@sveltejs/vite-plugin-svelte": "5.1",
|
||||||
"@types/bootstrap": "^5.0.12",
|
"@types/bootstrap": "^5.0.12",
|
||||||
"@types/codemirror": "^5.60.0",
|
"@types/codemirror": "^5.60.0",
|
||||||
"@types/d3": "^7.0.0",
|
"@types/d3": "^7.0.0",
|
||||||
|
@ -30,7 +30,7 @@
|
||||||
"@types/jqueryui": "^1.12.13",
|
"@types/jqueryui": "^1.12.13",
|
||||||
"@types/lodash-es": "^4.17.4",
|
"@types/lodash-es": "^4.17.4",
|
||||||
"@types/marked": "^5.0.0",
|
"@types/marked": "^5.0.0",
|
||||||
"@types/node": "^20",
|
"@types/node": "^22",
|
||||||
"@typescript-eslint/eslint-plugin": "^5.60.1",
|
"@typescript-eslint/eslint-plugin": "^5.60.1",
|
||||||
"@typescript-eslint/parser": "^5.60.1",
|
"@typescript-eslint/parser": "^5.60.1",
|
||||||
"caniuse-lite": "^1.0.30001431",
|
"caniuse-lite": "^1.0.30001431",
|
||||||
|
@ -48,16 +48,16 @@
|
||||||
"prettier": "^3.4.2",
|
"prettier": "^3.4.2",
|
||||||
"prettier-plugin-svelte": "^3.3.2",
|
"prettier-plugin-svelte": "^3.3.2",
|
||||||
"sass": "<1.77",
|
"sass": "<1.77",
|
||||||
"svelte": "^5.17.3",
|
"svelte": "^5.34.9",
|
||||||
"svelte-check": "^3.4.4",
|
"svelte-check": "^4.2.2",
|
||||||
"svelte-preprocess": "^5.0.4",
|
"svelte-preprocess": "^6.0.3",
|
||||||
"svelte-preprocess-esbuild": "^3.0.1",
|
"svelte-preprocess-esbuild": "^3.0.1",
|
||||||
"svgo": "^3.2.0",
|
"svgo": "^3.2.0",
|
||||||
"tslib": "^2.0.3",
|
"tslib": "^2.0.3",
|
||||||
"tsx": "^3.12.0",
|
"tsx": "^4.8.1",
|
||||||
"typescript": "^5.0.4",
|
"typescript": "^5.0.4",
|
||||||
"vite": "5.4.19",
|
"vite": "6",
|
||||||
"vitest": "^2"
|
"vitest": "^3"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@bufbuild/protobuf": "^1.2.1",
|
"@bufbuild/protobuf": "^1.2.1",
|
||||||
|
@ -81,7 +81,9 @@
|
||||||
},
|
},
|
||||||
"resolutions": {
|
"resolutions": {
|
||||||
"canvas": "npm:empty-npm-package@1.0.0",
|
"canvas": "npm:empty-npm-package@1.0.0",
|
||||||
"cookie": "0.7.0"
|
"cookie": "0.7.0",
|
||||||
|
"devalue": "^5.3.2",
|
||||||
|
"vite": "6"
|
||||||
},
|
},
|
||||||
"browserslist": [
|
"browserslist": [
|
||||||
"defaults",
|
"defaults",
|
||||||
|
|
|
@ -51,6 +51,7 @@ message Card {
|
||||||
optional FsrsMemoryState memory_state = 20;
|
optional FsrsMemoryState memory_state = 20;
|
||||||
optional float desired_retention = 21;
|
optional float desired_retention = 21;
|
||||||
optional float decay = 22;
|
optional float decay = 22;
|
||||||
|
optional int64 last_review_time_secs = 23;
|
||||||
string custom_data = 19;
|
string custom_data = 19;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,7 @@ service CollectionService {
|
||||||
rpc LatestProgress(generic.Empty) returns (Progress);
|
rpc LatestProgress(generic.Empty) returns (Progress);
|
||||||
rpc SetWantsAbort(generic.Empty) returns (generic.Empty);
|
rpc SetWantsAbort(generic.Empty) returns (generic.Empty);
|
||||||
rpc SetLoadBalancerEnabled(generic.Bool) returns (OpChanges);
|
rpc SetLoadBalancerEnabled(generic.Bool) returns (OpChanges);
|
||||||
|
rpc GetCustomColours(generic.Empty) returns (GetCustomColoursResponse);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implicitly includes any of the above methods that are not listed in the
|
// Implicitly includes any of the above methods that are not listed in the
|
||||||
|
@ -163,3 +164,7 @@ message CreateBackupRequest {
|
||||||
bool force = 2;
|
bool force = 2;
|
||||||
bool wait_for_completion = 3;
|
bool wait_for_completion = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message GetCustomColoursResponse {
|
||||||
|
repeated string colours = 1;
|
||||||
|
}
|
||||||
|
|
|
@ -56,6 +56,7 @@ message ConfigKey {
|
||||||
RENDER_LATEX = 25;
|
RENDER_LATEX = 25;
|
||||||
LOAD_BALANCER_ENABLED = 26;
|
LOAD_BALANCER_ENABLED = 26;
|
||||||
FSRS_SHORT_TERM_WITH_STEPS_ENABLED = 27;
|
FSRS_SHORT_TERM_WITH_STEPS_ENABLED = 27;
|
||||||
|
FSRS_LEGACY_EVALUATE = 28;
|
||||||
}
|
}
|
||||||
enum String {
|
enum String {
|
||||||
SET_DUE_BROWSER = 0;
|
SET_DUE_BROWSER = 0;
|
||||||
|
|
|
@ -40,12 +40,10 @@ message DeckConfigId {
|
||||||
message GetRetentionWorkloadRequest {
|
message GetRetentionWorkloadRequest {
|
||||||
repeated float w = 1;
|
repeated float w = 1;
|
||||||
string search = 2;
|
string search = 2;
|
||||||
float before = 3;
|
|
||||||
float after = 4;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetRetentionWorkloadResponse {
|
message GetRetentionWorkloadResponse {
|
||||||
float factor = 1;
|
map<uint32, float> costs = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetIgnoredBeforeCountRequest {
|
message GetIgnoredBeforeCountRequest {
|
||||||
|
@ -219,6 +217,8 @@ message DeckConfigsForUpdate {
|
||||||
bool review_today_active = 5;
|
bool review_today_active = 5;
|
||||||
// Whether new_today applies to today or a past day.
|
// Whether new_today applies to today or a past day.
|
||||||
bool new_today_active = 6;
|
bool new_today_active = 6;
|
||||||
|
// Deck-specific desired retention override
|
||||||
|
optional float desired_retention = 7;
|
||||||
}
|
}
|
||||||
string name = 1;
|
string name = 1;
|
||||||
int64 config_id = 2;
|
int64 config_id = 2;
|
||||||
|
@ -236,6 +236,7 @@ message DeckConfigsForUpdate {
|
||||||
bool new_cards_ignore_review_limit = 7;
|
bool new_cards_ignore_review_limit = 7;
|
||||||
bool fsrs = 8;
|
bool fsrs = 8;
|
||||||
bool fsrs_health_check = 11;
|
bool fsrs_health_check = 11;
|
||||||
|
bool fsrs_legacy_evaluate = 12;
|
||||||
bool apply_all_parent_limits = 9;
|
bool apply_all_parent_limits = 9;
|
||||||
uint32 days_since_last_fsrs_optimize = 10;
|
uint32 days_since_last_fsrs_optimize = 10;
|
||||||
}
|
}
|
||||||
|
|
|
@ -83,6 +83,8 @@ message Deck {
|
||||||
optional uint32 new_limit = 7;
|
optional uint32 new_limit = 7;
|
||||||
DayLimit review_limit_today = 8;
|
DayLimit review_limit_today = 8;
|
||||||
DayLimit new_limit_today = 9;
|
DayLimit new_limit_today = 9;
|
||||||
|
// Deck-specific desired retention override
|
||||||
|
optional float desired_retention = 10;
|
||||||
|
|
||||||
reserved 12 to 15;
|
reserved 12 to 15;
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,6 +27,9 @@ service FrontendService {
|
||||||
rpc deckOptionsRequireClose(generic.Empty) returns (generic.Empty);
|
rpc deckOptionsRequireClose(generic.Empty) returns (generic.Empty);
|
||||||
// Warns python that the deck option web view is ready to receive requests.
|
// Warns python that the deck option web view is ready to receive requests.
|
||||||
rpc deckOptionsReady(generic.Empty) returns (generic.Empty);
|
rpc deckOptionsReady(generic.Empty) returns (generic.Empty);
|
||||||
|
|
||||||
|
// Save colour picker's custom colour palette
|
||||||
|
rpc SaveCustomColours(generic.Empty) returns (generic.Empty);
|
||||||
}
|
}
|
||||||
|
|
||||||
service BackendFrontendService {}
|
service BackendFrontendService {}
|
||||||
|
|
|
@ -176,9 +176,12 @@ message CsvMetadata {
|
||||||
// to determine the number of columns.
|
// to determine the number of columns.
|
||||||
repeated string column_labels = 5;
|
repeated string column_labels = 5;
|
||||||
oneof deck {
|
oneof deck {
|
||||||
|
// id of an existing deck
|
||||||
int64 deck_id = 6;
|
int64 deck_id = 6;
|
||||||
// One-based. 0 means n/a.
|
// One-based. 0 means n/a.
|
||||||
uint32 deck_column = 7;
|
uint32 deck_column = 7;
|
||||||
|
// name of new deck to be created
|
||||||
|
string deck_name = 17;
|
||||||
}
|
}
|
||||||
oneof notetype {
|
oneof notetype {
|
||||||
// One notetype for all rows with given column mapping.
|
// One notetype for all rows with given column mapping.
|
||||||
|
|
|
@ -59,7 +59,7 @@ message AddNoteRequest {
|
||||||
}
|
}
|
||||||
|
|
||||||
message AddNoteResponse {
|
message AddNoteResponse {
|
||||||
collection.OpChanges changes = 1;
|
collection.OpChangesWithCount changes = 1;
|
||||||
int64 note_id = 2;
|
int64 note_id = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,11 @@ service SchedulerService {
|
||||||
returns (ComputeOptimalRetentionResponse);
|
returns (ComputeOptimalRetentionResponse);
|
||||||
rpc SimulateFsrsReview(SimulateFsrsReviewRequest)
|
rpc SimulateFsrsReview(SimulateFsrsReviewRequest)
|
||||||
returns (SimulateFsrsReviewResponse);
|
returns (SimulateFsrsReviewResponse);
|
||||||
|
rpc SimulateFsrsWorkload(SimulateFsrsReviewRequest)
|
||||||
|
returns (SimulateFsrsWorkloadResponse);
|
||||||
rpc EvaluateParams(EvaluateParamsRequest) returns (EvaluateParamsResponse);
|
rpc EvaluateParams(EvaluateParamsRequest) returns (EvaluateParamsResponse);
|
||||||
|
rpc EvaluateParamsLegacy(EvaluateParamsLegacyRequest)
|
||||||
|
returns (EvaluateParamsResponse);
|
||||||
rpc ComputeMemoryState(cards.CardId) returns (ComputeMemoryStateResponse);
|
rpc ComputeMemoryState(cards.CardId) returns (ComputeMemoryStateResponse);
|
||||||
// The number of days the calculated interval was fuzzed by on the previous
|
// The number of days the calculated interval was fuzzed by on the previous
|
||||||
// review (if any). Utilized by the FSRS add-on.
|
// review (if any). Utilized by the FSRS add-on.
|
||||||
|
@ -402,6 +406,9 @@ message SimulateFsrsReviewRequest {
|
||||||
repeated float easy_days_percentages = 10;
|
repeated float easy_days_percentages = 10;
|
||||||
deck_config.DeckConfig.Config.ReviewCardOrder review_order = 11;
|
deck_config.DeckConfig.Config.ReviewCardOrder review_order = 11;
|
||||||
optional uint32 suspend_after_lapse_count = 12;
|
optional uint32 suspend_after_lapse_count = 12;
|
||||||
|
float historical_retention = 13;
|
||||||
|
uint32 learning_step_count = 14;
|
||||||
|
uint32 relearning_step_count = 15;
|
||||||
}
|
}
|
||||||
|
|
||||||
message SimulateFsrsReviewResponse {
|
message SimulateFsrsReviewResponse {
|
||||||
|
@ -411,6 +418,12 @@ message SimulateFsrsReviewResponse {
|
||||||
repeated float daily_time_cost = 4;
|
repeated float daily_time_cost = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message SimulateFsrsWorkloadResponse {
|
||||||
|
map<uint32, float> cost = 1;
|
||||||
|
map<uint32, float> memorized = 2;
|
||||||
|
map<uint32, uint32> review_count = 3;
|
||||||
|
}
|
||||||
|
|
||||||
message ComputeOptimalRetentionResponse {
|
message ComputeOptimalRetentionResponse {
|
||||||
float optimal_retention = 1;
|
float optimal_retention = 1;
|
||||||
}
|
}
|
||||||
|
@ -442,6 +455,12 @@ message EvaluateParamsRequest {
|
||||||
uint32 num_of_relearning_steps = 3;
|
uint32 num_of_relearning_steps = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message EvaluateParamsLegacyRequest {
|
||||||
|
repeated float params = 1;
|
||||||
|
string search = 2;
|
||||||
|
int64 ignore_revlogs_before_ms = 3;
|
||||||
|
}
|
||||||
|
|
||||||
message EvaluateParamsResponse {
|
message EvaluateParamsResponse {
|
||||||
float log_loss = 1;
|
float log_loss = 1;
|
||||||
float rmse_bins = 2;
|
float rmse_bins = 2;
|
||||||
|
@ -450,6 +469,7 @@ message EvaluateParamsResponse {
|
||||||
message ComputeMemoryStateResponse {
|
message ComputeMemoryStateResponse {
|
||||||
optional cards.FsrsMemoryState state = 1;
|
optional cards.FsrsMemoryState state = 1;
|
||||||
float desired_retention = 2;
|
float desired_retention = 2;
|
||||||
|
float decay = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message FuzzDeltaRequest {
|
message FuzzDeltaRequest {
|
||||||
|
|
|
@ -74,10 +74,15 @@ message SearchNode {
|
||||||
repeated SearchNode nodes = 1;
|
repeated SearchNode nodes = 1;
|
||||||
Joiner joiner = 2;
|
Joiner joiner = 2;
|
||||||
}
|
}
|
||||||
|
enum FieldSearchMode {
|
||||||
|
FIELD_SEARCH_MODE_NORMAL = 0;
|
||||||
|
FIELD_SEARCH_MODE_REGEX = 1;
|
||||||
|
FIELD_SEARCH_MODE_NOCOMBINING = 2;
|
||||||
|
}
|
||||||
message Field {
|
message Field {
|
||||||
string field_name = 1;
|
string field_name = 1;
|
||||||
string text = 2;
|
string text = 2;
|
||||||
bool is_re = 3;
|
FieldSearchMode mode = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
oneof filter {
|
oneof filter {
|
||||||
|
|
|
@ -46,7 +46,6 @@ from .errors import (
|
||||||
|
|
||||||
# the following comment is required to suppress a warning that only shows up
|
# the following comment is required to suppress a warning that only shows up
|
||||||
# when there are other pylint failures
|
# when there are other pylint failures
|
||||||
# pylint: disable=c-extension-no-member
|
|
||||||
if _rsbridge.buildhash() != anki.buildinfo.buildhash:
|
if _rsbridge.buildhash() != anki.buildinfo.buildhash:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"""rsbridge and anki build hashes do not match:
|
f"""rsbridge and anki build hashes do not match:
|
||||||
|
@ -164,7 +163,7 @@ class RustBackend(RustBackendGenerated):
|
||||||
finally:
|
finally:
|
||||||
elapsed = time.time() - start
|
elapsed = time.time() - start
|
||||||
if current_thread() is main_thread() and elapsed > 0.2:
|
if current_thread() is main_thread() and elapsed > 0.2:
|
||||||
print(f"blocked main thread for {int(elapsed*1000)}ms:")
|
print(f"blocked main thread for {int(elapsed * 1000)}ms:")
|
||||||
print("".join(traceback.format_stack()))
|
print("".join(traceback.format_stack()))
|
||||||
|
|
||||||
err = backend_pb2.BackendError()
|
err = backend_pb2.BackendError()
|
||||||
|
@ -247,7 +246,7 @@ def backend_exception_to_pylib(err: backend_pb2.BackendError) -> Exception:
|
||||||
return BackendError(err.message, help_page, context, backtrace)
|
return BackendError(err.message, help_page, context, backtrace)
|
||||||
|
|
||||||
elif val == kind.SEARCH_ERROR:
|
elif val == kind.SEARCH_ERROR:
|
||||||
return SearchError(markdown(err.message), help_page, context, backtrace)
|
return SearchError(err.message, help_page, context, backtrace)
|
||||||
|
|
||||||
elif val == kind.UNDO_EMPTY:
|
elif val == kind.UNDO_EMPTY:
|
||||||
return UndoEmpty(err.message, help_page, context, backtrace)
|
return UndoEmpty(err.message, help_page, context, backtrace)
|
||||||
|
|
|
@ -7,7 +7,7 @@ import pprint
|
||||||
import time
|
import time
|
||||||
from typing import NewType
|
from typing import NewType
|
||||||
|
|
||||||
import anki # pylint: disable=unused-import
|
import anki
|
||||||
import anki.collection
|
import anki.collection
|
||||||
import anki.decks
|
import anki.decks
|
||||||
import anki.notes
|
import anki.notes
|
||||||
|
@ -49,6 +49,7 @@ class Card(DeprecatedNamesMixin):
|
||||||
memory_state: FSRSMemoryState | None
|
memory_state: FSRSMemoryState | None
|
||||||
desired_retention: float | None
|
desired_retention: float | None
|
||||||
decay: float | None
|
decay: float | None
|
||||||
|
last_review_time: int | None
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
@ -103,6 +104,11 @@ class Card(DeprecatedNamesMixin):
|
||||||
card.desired_retention if card.HasField("desired_retention") else None
|
card.desired_retention if card.HasField("desired_retention") else None
|
||||||
)
|
)
|
||||||
self.decay = card.decay if card.HasField("decay") else None
|
self.decay = card.decay if card.HasField("decay") else None
|
||||||
|
self.last_review_time = (
|
||||||
|
card.last_review_time_secs
|
||||||
|
if card.HasField("last_review_time_secs")
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
|
||||||
def _to_backend_card(self) -> cards_pb2.Card:
|
def _to_backend_card(self) -> cards_pb2.Card:
|
||||||
# mtime & usn are set by backend
|
# mtime & usn are set by backend
|
||||||
|
@ -127,6 +133,7 @@ class Card(DeprecatedNamesMixin):
|
||||||
memory_state=self.memory_state,
|
memory_state=self.memory_state,
|
||||||
desired_retention=self.desired_retention,
|
desired_retention=self.desired_retention,
|
||||||
decay=self.decay,
|
decay=self.decay,
|
||||||
|
last_review_time_secs=self.last_review_time,
|
||||||
)
|
)
|
||||||
|
|
||||||
@deprecated(info="please use col.update_card()")
|
@deprecated(info="please use col.update_card()")
|
||||||
|
|
|
@ -122,6 +122,7 @@ class ComputedMemoryState:
|
||||||
desired_retention: float
|
desired_retention: float
|
||||||
stability: float | None = None
|
stability: float | None = None
|
||||||
difficulty: float | None = None
|
difficulty: float | None = None
|
||||||
|
decay: float | None = None
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
@ -157,7 +158,7 @@ class Collection(DeprecatedNamesMixin):
|
||||||
self.tags = TagManager(self)
|
self.tags = TagManager(self)
|
||||||
self.conf = ConfigManager(self)
|
self.conf = ConfigManager(self)
|
||||||
self._load_scheduler()
|
self._load_scheduler()
|
||||||
self._startReps = 0 # pylint: disable=invalid-name
|
self._startReps = 0
|
||||||
|
|
||||||
def name(self) -> Any:
|
def name(self) -> Any:
|
||||||
return os.path.splitext(os.path.basename(self.path))[0]
|
return os.path.splitext(os.path.basename(self.path))[0]
|
||||||
|
@ -510,9 +511,7 @@ class Collection(DeprecatedNamesMixin):
|
||||||
# Utils
|
# Utils
|
||||||
##########################################################################
|
##########################################################################
|
||||||
|
|
||||||
def nextID( # pylint: disable=invalid-name
|
def nextID(self, type: str, inc: bool = True) -> Any:
|
||||||
self, type: str, inc: bool = True
|
|
||||||
) -> Any:
|
|
||||||
type = f"next{type.capitalize()}"
|
type = f"next{type.capitalize()}"
|
||||||
id = self.conf.get(type, 1)
|
id = self.conf.get(type, 1)
|
||||||
if inc:
|
if inc:
|
||||||
|
@ -529,7 +528,7 @@ class Collection(DeprecatedNamesMixin):
|
||||||
def new_note(self, notetype: NotetypeDict) -> Note:
|
def new_note(self, notetype: NotetypeDict) -> Note:
|
||||||
return Note(self, notetype)
|
return Note(self, notetype)
|
||||||
|
|
||||||
def add_note(self, note: Note, deck_id: DeckId) -> OpChanges:
|
def add_note(self, note: Note, deck_id: DeckId) -> OpChangesWithCount:
|
||||||
hooks.note_will_be_added(self, note, deck_id)
|
hooks.note_will_be_added(self, note, deck_id)
|
||||||
out = self._backend.add_note(note=note._to_backend_note(), deck_id=deck_id)
|
out = self._backend.add_note(note=note._to_backend_note(), deck_id=deck_id)
|
||||||
note.id = NoteId(out.note_id)
|
note.id = NoteId(out.note_id)
|
||||||
|
@ -848,7 +847,6 @@ class Collection(DeprecatedNamesMixin):
|
||||||
)
|
)
|
||||||
|
|
||||||
def _pb_search_separator(self, operator: SearchJoiner) -> SearchNode.Group.Joiner.V:
|
def _pb_search_separator(self, operator: SearchJoiner) -> SearchNode.Group.Joiner.V:
|
||||||
# pylint: disable=no-member
|
|
||||||
if operator == "AND":
|
if operator == "AND":
|
||||||
return SearchNode.Group.Joiner.AND
|
return SearchNode.Group.Joiner.AND
|
||||||
else:
|
else:
|
||||||
|
@ -866,7 +864,9 @@ class Collection(DeprecatedNamesMixin):
|
||||||
return column
|
return column
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def browser_row_for_id(self, id_: int) -> tuple[
|
def browser_row_for_id(
|
||||||
|
self, id_: int
|
||||||
|
) -> tuple[
|
||||||
Generator[tuple[str, bool, BrowserRow.Cell.TextElideMode.V], None, None],
|
Generator[tuple[str, bool, BrowserRow.Cell.TextElideMode.V], None, None],
|
||||||
BrowserRow.Color.V,
|
BrowserRow.Color.V,
|
||||||
str,
|
str,
|
||||||
|
@ -1189,9 +1189,13 @@ class Collection(DeprecatedNamesMixin):
|
||||||
desired_retention=resp.desired_retention,
|
desired_retention=resp.desired_retention,
|
||||||
stability=resp.state.stability,
|
stability=resp.state.stability,
|
||||||
difficulty=resp.state.difficulty,
|
difficulty=resp.state.difficulty,
|
||||||
|
decay=resp.decay,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
return ComputedMemoryState(desired_retention=resp.desired_retention)
|
return ComputedMemoryState(
|
||||||
|
desired_retention=resp.desired_retention,
|
||||||
|
decay=resp.decay,
|
||||||
|
)
|
||||||
|
|
||||||
def fuzz_delta(self, card_id: CardId, interval: int) -> int:
|
def fuzz_delta(self, card_id: CardId, interval: int) -> int:
|
||||||
"The delta days of fuzz applied if reviewing the card in v3."
|
"The delta days of fuzz applied if reviewing the card in v3."
|
||||||
|
@ -1207,8 +1211,6 @@ class Collection(DeprecatedNamesMixin):
|
||||||
# the count on things like edits, which we probably could do by checking
|
# the count on things like edits, which we probably could do by checking
|
||||||
# the previous state in moveToState.
|
# the previous state in moveToState.
|
||||||
|
|
||||||
# pylint: disable=invalid-name
|
|
||||||
|
|
||||||
def startTimebox(self) -> None:
|
def startTimebox(self) -> None:
|
||||||
self._startTime = time.time()
|
self._startTime = time.time()
|
||||||
self._startReps = self.sched.reps
|
self._startReps = self.sched.reps
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# Copyright: Ankitects Pty Ltd and contributors
|
# Copyright: Ankitects Pty Ltd and contributors
|
||||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
# pylint: disable=invalid-name
|
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
@ -351,7 +350,7 @@ class AnkiPackageExporter(AnkiExporter):
|
||||||
colfile = path.replace(".apkg", ".anki2")
|
colfile = path.replace(".apkg", ".anki2")
|
||||||
AnkiExporter.exportInto(self, colfile)
|
AnkiExporter.exportInto(self, colfile)
|
||||||
# prevent older clients from accessing
|
# prevent older clients from accessing
|
||||||
# pylint: disable=unreachable
|
|
||||||
self._addDummyCollection(z)
|
self._addDummyCollection(z)
|
||||||
z.write(colfile, "collection.anki21")
|
z.write(colfile, "collection.anki21")
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# Copyright: Ankitects Pty Ltd and contributors
|
# Copyright: Ankitects Pty Ltd and contributors
|
||||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
# pylint: disable=invalid-name
|
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
# Copyright: Ankitects Pty Ltd and contributors
|
# Copyright: Ankitects Pty Ltd and contributors
|
||||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
"""Helpers for serializing third-party collections to a common JSON form.
|
"""Helpers for serializing third-party collections to a common JSON form."""
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
|
@ -175,8 +175,8 @@ class MnemoFact:
|
||||||
def fact_view(self) -> type[MnemoFactView]:
|
def fact_view(self) -> type[MnemoFactView]:
|
||||||
try:
|
try:
|
||||||
fact_view = self.cards[0].fact_view_id
|
fact_view = self.cards[0].fact_view_id
|
||||||
except IndexError as err:
|
except IndexError:
|
||||||
raise Exception(f"Fact {id} has no cards") from err
|
return FrontOnly
|
||||||
|
|
||||||
if fact_view.startswith("1.") or fact_view.startswith("1::"):
|
if fact_view.startswith("1.") or fact_view.startswith("1::"):
|
||||||
return FrontOnly
|
return FrontOnly
|
||||||
|
@ -187,7 +187,7 @@ class MnemoFact:
|
||||||
elif fact_view.startswith("5.1"):
|
elif fact_view.startswith("5.1"):
|
||||||
return Cloze
|
return Cloze
|
||||||
|
|
||||||
raise Exception(f"Fact {id} has unknown fact view: {fact_view}")
|
raise Exception(f"Fact {self.id} has unknown fact view: {fact_view}")
|
||||||
|
|
||||||
def anki_fields(self, fact_view: type[MnemoFactView]) -> list[str]:
|
def anki_fields(self, fact_view: type[MnemoFactView]) -> list[str]:
|
||||||
return [munge_field(self.fields.get(k, "")) for k in fact_view.field_keys]
|
return [munge_field(self.fields.get(k, "")) for k in fact_view.field_keys]
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# Copyright: Ankitects Pty Ltd and contributors
|
# Copyright: Ankitects Pty Ltd and contributors
|
||||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
# pylint: disable=invalid-name
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Tools for extending Anki.
|
Tools for extending Anki.
|
||||||
|
|
|
@ -11,8 +11,6 @@ from anki.importing.apkg import AnkiPackageImporter
|
||||||
from anki.importing.base import Importer
|
from anki.importing.base import Importer
|
||||||
from anki.importing.csvfile import TextImporter
|
from anki.importing.csvfile import TextImporter
|
||||||
from anki.importing.mnemo import MnemosyneImporter
|
from anki.importing.mnemo import MnemosyneImporter
|
||||||
from anki.importing.pauker import PaukerImporter
|
|
||||||
from anki.importing.supermemo_xml import SupermemoXmlImporter # type: ignore
|
|
||||||
from anki.lang import TR
|
from anki.lang import TR
|
||||||
|
|
||||||
|
|
||||||
|
@ -24,8 +22,6 @@ def importers(col: Collection) -> Sequence[tuple[str, type[Importer]]]:
|
||||||
AnkiPackageImporter,
|
AnkiPackageImporter,
|
||||||
),
|
),
|
||||||
(col.tr.importing_mnemosyne_20_deck_db(), MnemosyneImporter),
|
(col.tr.importing_mnemosyne_20_deck_db(), MnemosyneImporter),
|
||||||
(col.tr.importing_supermemo_xml_export_xml(), SupermemoXmlImporter),
|
|
||||||
(col.tr.importing_pauker_18_lesson_paugz(), PaukerImporter),
|
|
||||||
]
|
]
|
||||||
anki.hooks.importing_importers(importers)
|
anki.hooks.importing_importers(importers)
|
||||||
return importers
|
return importers
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# Copyright: Ankitects Pty Ltd and contributors
|
# Copyright: Ankitects Pty Ltd and contributors
|
||||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
# pylint: disable=invalid-name
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# Copyright: Ankitects Pty Ltd and contributors
|
# Copyright: Ankitects Pty Ltd and contributors
|
||||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
# pylint: disable=invalid-name
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# Copyright: Ankitects Pty Ltd and contributors
|
# Copyright: Ankitects Pty Ltd and contributors
|
||||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
# pylint: disable=invalid-name
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# Copyright: Ankitects Pty Ltd and contributors
|
# Copyright: Ankitects Pty Ltd and contributors
|
||||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
# pylint: disable=invalid-name
|
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
@ -144,7 +143,6 @@ class TextImporter(NoteImporter):
|
||||||
self.close()
|
self.close()
|
||||||
zuper = super()
|
zuper = super()
|
||||||
if hasattr(zuper, "__del__"):
|
if hasattr(zuper, "__del__"):
|
||||||
# pylint: disable=no-member
|
|
||||||
zuper.__del__(self) # type: ignore
|
zuper.__del__(self) # type: ignore
|
||||||
|
|
||||||
def noteFromFields(self, fields: list[str]) -> ForeignNote:
|
def noteFromFields(self, fields: list[str]) -> ForeignNote:
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# Copyright: Ankitects Pty Ltd and contributors
|
# Copyright: Ankitects Pty Ltd and contributors
|
||||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
# pylint: disable=invalid-name
|
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
|
@ -35,7 +34,6 @@ f._id=d._fact_id"""
|
||||||
):
|
):
|
||||||
if id != curid:
|
if id != curid:
|
||||||
if note:
|
if note:
|
||||||
# pylint: disable=unsubscriptable-object
|
|
||||||
notes[note["_id"]] = note
|
notes[note["_id"]] = note
|
||||||
note = {"_id": _id}
|
note = {"_id": _id}
|
||||||
curid = id
|
curid = id
|
||||||
|
@ -185,7 +183,6 @@ acq_reps+ret_reps, lapses, card_type_id from cards"""
|
||||||
state = dict(n=1)
|
state = dict(n=1)
|
||||||
|
|
||||||
def repl(match):
|
def repl(match):
|
||||||
# pylint: disable=cell-var-from-loop
|
|
||||||
# replace [...] with cloze refs
|
# replace [...] with cloze refs
|
||||||
res = "{{c%d::%s}}" % (state["n"], match.group(1))
|
res = "{{c%d::%s}}" % (state["n"], match.group(1))
|
||||||
state["n"] += 1
|
state["n"] += 1
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# Copyright: Ankitects Pty Ltd and contributors
|
# Copyright: Ankitects Pty Ltd and contributors
|
||||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
# pylint: disable=invalid-name
|
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
@ -167,9 +166,9 @@ class NoteImporter(Importer):
|
||||||
firsts[fld0] = True
|
firsts[fld0] = True
|
||||||
# already exists?
|
# already exists?
|
||||||
found = False
|
found = False
|
||||||
if csum in csums:
|
if csum in csums: # type: ignore[comparison-overlap]
|
||||||
# csum is not a guarantee; have to check
|
# csum is not a guarantee; have to check
|
||||||
for id in csums[csum]:
|
for id in csums[csum]: # type: ignore[index]
|
||||||
flds = self.col.db.scalar("select flds from notes where id = ?", id)
|
flds = self.col.db.scalar("select flds from notes where id = ?", id)
|
||||||
sflds = split_fields(flds)
|
sflds = split_fields(flds)
|
||||||
if fld0 == sflds[0]:
|
if fld0 == sflds[0]:
|
||||||
|
|
|
@ -1,94 +0,0 @@
|
||||||
# Copyright: Andreas Klauer <Andreas.Klauer@metamorpher.de>
|
|
||||||
# License: BSD-3
|
|
||||||
|
|
||||||
# pylint: disable=invalid-name
|
|
||||||
|
|
||||||
import gzip
|
|
||||||
import html
|
|
||||||
import math
|
|
||||||
import random
|
|
||||||
import time
|
|
||||||
import xml.etree.ElementTree as ET
|
|
||||||
|
|
||||||
from anki.importing.noteimp import ForeignCard, ForeignNote, NoteImporter
|
|
||||||
from anki.stdmodels import _legacy_add_forward_reverse
|
|
||||||
|
|
||||||
ONE_DAY = 60 * 60 * 24
|
|
||||||
|
|
||||||
|
|
||||||
class PaukerImporter(NoteImporter):
|
|
||||||
"""Import Pauker 1.8 Lesson (*.pau.gz)"""
|
|
||||||
|
|
||||||
needMapper = False
|
|
||||||
allowHTML = True
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
model = _legacy_add_forward_reverse(self.col)
|
|
||||||
model["name"] = "Pauker"
|
|
||||||
self.col.models.save(model, updateReqs=False)
|
|
||||||
self.col.models.set_current(model)
|
|
||||||
self.model = model
|
|
||||||
self.initMapping()
|
|
||||||
NoteImporter.run(self)
|
|
||||||
|
|
||||||
def fields(self):
|
|
||||||
"""Pauker is Front/Back"""
|
|
||||||
return 2
|
|
||||||
|
|
||||||
def foreignNotes(self):
|
|
||||||
"""Build and return a list of notes."""
|
|
||||||
notes = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
f = gzip.open(self.file)
|
|
||||||
tree = ET.parse(f) # type: ignore
|
|
||||||
lesson = tree.getroot()
|
|
||||||
assert lesson.tag == "Lesson"
|
|
||||||
finally:
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
index = -4
|
|
||||||
|
|
||||||
for batch in lesson.findall("./Batch"):
|
|
||||||
index += 1
|
|
||||||
|
|
||||||
for card in batch.findall("./Card"):
|
|
||||||
# Create a note for this card.
|
|
||||||
front = card.findtext("./FrontSide/Text")
|
|
||||||
back = card.findtext("./ReverseSide/Text")
|
|
||||||
note = ForeignNote()
|
|
||||||
assert front and back
|
|
||||||
note.fields = [
|
|
||||||
html.escape(x.strip())
|
|
||||||
.replace("\n", "<br>")
|
|
||||||
.replace(" ", " ")
|
|
||||||
for x in [front, back]
|
|
||||||
]
|
|
||||||
notes.append(note)
|
|
||||||
|
|
||||||
# Determine due date for cards.
|
|
||||||
frontdue = card.find("./FrontSide[@LearnedTimestamp]")
|
|
||||||
backdue = card.find("./ReverseSide[@Batch][@LearnedTimestamp]")
|
|
||||||
|
|
||||||
if frontdue is not None:
|
|
||||||
note.cards[0] = self._learnedCard(
|
|
||||||
index, int(frontdue.attrib["LearnedTimestamp"])
|
|
||||||
)
|
|
||||||
|
|
||||||
if backdue is not None:
|
|
||||||
note.cards[1] = self._learnedCard(
|
|
||||||
int(backdue.attrib["Batch"]),
|
|
||||||
int(backdue.attrib["LearnedTimestamp"]),
|
|
||||||
)
|
|
||||||
|
|
||||||
return notes
|
|
||||||
|
|
||||||
def _learnedCard(self, batch, timestamp):
|
|
||||||
ivl = math.exp(batch)
|
|
||||||
now = time.time()
|
|
||||||
due = ivl - (now - timestamp / 1000.0) / ONE_DAY
|
|
||||||
fc = ForeignCard()
|
|
||||||
fc.due = self.col.sched.today + int(due + 0.5)
|
|
||||||
fc.ivl = random.randint(int(ivl * 0.90), int(ivl + 0.5))
|
|
||||||
fc.factor = random.randint(1500, 2500)
|
|
||||||
return fc
|
|
|
@ -1,484 +0,0 @@
|
||||||
# Copyright: petr.michalec@gmail.com
|
|
||||||
# License: GNU GPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
|
||||||
# pytype: disable=attribute-error
|
|
||||||
# type: ignore
|
|
||||||
# pylint: disable=C
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
import unicodedata
|
|
||||||
from string import capwords
|
|
||||||
from xml.dom import minidom
|
|
||||||
from xml.dom.minidom import Element, Text
|
|
||||||
|
|
||||||
from anki.collection import Collection
|
|
||||||
from anki.importing.noteimp import ForeignCard, ForeignNote, NoteImporter
|
|
||||||
from anki.stdmodels import _legacy_add_basic_model
|
|
||||||
|
|
||||||
|
|
||||||
class SmartDict(dict):
|
|
||||||
"""
|
|
||||||
See http://www.peterbe.com/plog/SmartDict
|
|
||||||
Copyright 2005, Peter Bengtsson, peter@fry-it.com
|
|
||||||
0BSD
|
|
||||||
|
|
||||||
A smart dict can be instantiated either from a pythonic dict
|
|
||||||
or an instance object (eg. SQL recordsets) but it ensures that you can
|
|
||||||
do all the convenient lookups such as x.first_name, x['first_name'] or
|
|
||||||
x.get('first_name').
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, *a, **kw) -> None:
|
|
||||||
if a:
|
|
||||||
if isinstance(type(a[0]), dict):
|
|
||||||
kw.update(a[0])
|
|
||||||
elif isinstance(type(a[0]), object):
|
|
||||||
kw.update(a[0].__dict__)
|
|
||||||
elif hasattr(a[0], "__class__") and a[0].__class__.__name__ == "SmartDict":
|
|
||||||
kw.update(a[0].__dict__)
|
|
||||||
|
|
||||||
dict.__init__(self, **kw)
|
|
||||||
self.__dict__ = self
|
|
||||||
|
|
||||||
|
|
||||||
class SuperMemoElement(SmartDict):
|
|
||||||
"SmartDict wrapper to store SM Element data"
|
|
||||||
|
|
||||||
def __init__(self, *a, **kw) -> None:
|
|
||||||
SmartDict.__init__(self, *a, **kw)
|
|
||||||
# default content
|
|
||||||
self.__dict__["lTitle"] = None
|
|
||||||
self.__dict__["Title"] = None
|
|
||||||
self.__dict__["Question"] = None
|
|
||||||
self.__dict__["Answer"] = None
|
|
||||||
self.__dict__["Count"] = None
|
|
||||||
self.__dict__["Type"] = None
|
|
||||||
self.__dict__["ID"] = None
|
|
||||||
self.__dict__["Interval"] = None
|
|
||||||
self.__dict__["Lapses"] = None
|
|
||||||
self.__dict__["Repetitions"] = None
|
|
||||||
self.__dict__["LastRepetiton"] = None
|
|
||||||
self.__dict__["AFactor"] = None
|
|
||||||
self.__dict__["UFactor"] = None
|
|
||||||
|
|
||||||
|
|
||||||
# This is an AnkiImporter
|
|
||||||
class SupermemoXmlImporter(NoteImporter):
|
|
||||||
needMapper = False
|
|
||||||
allowHTML = True
|
|
||||||
|
|
||||||
"""
|
|
||||||
Supermemo XML export's to Anki parser.
|
|
||||||
Goes through a SM collection and fetch all elements.
|
|
||||||
|
|
||||||
My SM collection was a big mess where topics and items were mixed.
|
|
||||||
I was unable to parse my content in a regular way like for loop on
|
|
||||||
minidom.getElementsByTagName() etc. My collection had also an
|
|
||||||
limitation, topics were splited into branches with max 100 items
|
|
||||||
on each. Learning themes were in deep structure. I wanted to have
|
|
||||||
full title on each element to be stored in tags.
|
|
||||||
|
|
||||||
Code should be upgrade to support importing of SM2006 exports.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, col: Collection, file: str) -> None:
|
|
||||||
"""Initialize internal variables.
|
|
||||||
Pameters to be exposed to GUI are stored in self.META"""
|
|
||||||
NoteImporter.__init__(self, col, file)
|
|
||||||
m = _legacy_add_basic_model(self.col)
|
|
||||||
m["name"] = "Supermemo"
|
|
||||||
self.col.models.save(m)
|
|
||||||
self.initMapping()
|
|
||||||
|
|
||||||
self.lines = None
|
|
||||||
self.numFields = int(2)
|
|
||||||
|
|
||||||
# SmXmlParse VARIABLES
|
|
||||||
self.xmldoc = None
|
|
||||||
self.pieces = []
|
|
||||||
self.cntBuf = [] # to store last parsed data
|
|
||||||
self.cntElm = [] # to store SM Elements data
|
|
||||||
self.cntCol = [] # to store SM Colections data
|
|
||||||
|
|
||||||
# store some meta info related to parse algorithm
|
|
||||||
# SmartDict works like dict / class wrapper
|
|
||||||
self.cntMeta = SmartDict()
|
|
||||||
self.cntMeta.popTitles = False
|
|
||||||
self.cntMeta.title = []
|
|
||||||
|
|
||||||
# META stores controls of import script, should be
|
|
||||||
# exposed to import dialog. These are default values.
|
|
||||||
self.META = SmartDict()
|
|
||||||
self.META.resetLearningData = False # implemented
|
|
||||||
self.META.onlyMemorizedItems = False # implemented
|
|
||||||
self.META.loggerLevel = 2 # implemented 0no,1info,2error,3debug
|
|
||||||
self.META.tagAllTopics = True
|
|
||||||
self.META.pathsToBeTagged = [
|
|
||||||
"English for beginners",
|
|
||||||
"Advanced English 97",
|
|
||||||
"Phrasal Verbs",
|
|
||||||
] # path patterns to be tagged - in gui entered like 'Advanced English 97|My Vocablary'
|
|
||||||
self.META.tagMemorizedItems = True # implemented
|
|
||||||
self.META.logToStdOutput = False # implemented
|
|
||||||
|
|
||||||
self.notes = []
|
|
||||||
|
|
||||||
## TOOLS
|
|
||||||
|
|
||||||
def _fudgeText(self, text: str) -> str:
|
|
||||||
"Replace sm syntax to Anki syntax"
|
|
||||||
text = text.replace("\n\r", "<br>")
|
|
||||||
text = text.replace("\n", "<br>")
|
|
||||||
return text
|
|
||||||
|
|
||||||
def _unicode2ascii(self, str: str) -> str:
|
|
||||||
"Remove diacritic punctuation from strings (titles)"
|
|
||||||
return "".join(
|
|
||||||
[
|
|
||||||
c
|
|
||||||
for c in unicodedata.normalize("NFKD", str)
|
|
||||||
if not unicodedata.combining(c)
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
def _decode_htmlescapes(self, html: str) -> str:
|
|
||||||
"""Unescape HTML code."""
|
|
||||||
# In case of bad formatted html you can import MinimalSoup etc.. see BeautifulSoup source code
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
|
|
||||||
# my sm2004 also ecaped & char in escaped sequences.
|
|
||||||
html = re.sub("&", "&", html)
|
|
||||||
|
|
||||||
# https://anki.tenderapp.com/discussions/ankidesktop/39543-anki-is-replacing-the-character-by-when-i-exit-the-html-edit-mode-ctrlshiftx
|
|
||||||
if html.find(">") < 0:
|
|
||||||
return html
|
|
||||||
|
|
||||||
# unescaped solitary chars < or > that were ok for minidom confuse btfl soup
|
|
||||||
# html = re.sub(u'>',u'>',html)
|
|
||||||
# html = re.sub(u'<',u'<',html)
|
|
||||||
|
|
||||||
return str(BeautifulSoup(html, "html.parser"))
|
|
||||||
|
|
||||||
def _afactor2efactor(self, af: float) -> float:
|
|
||||||
# Adapted from <http://www.supermemo.com/beta/xml/xml-core.htm>
|
|
||||||
|
|
||||||
# Ranges for A-factors and E-factors
|
|
||||||
af_min = 1.2
|
|
||||||
af_max = 6.9
|
|
||||||
ef_min = 1.3
|
|
||||||
ef_max = 3.3
|
|
||||||
|
|
||||||
# Sanity checks for the A-factor
|
|
||||||
if af < af_min:
|
|
||||||
af = af_min
|
|
||||||
elif af > af_max:
|
|
||||||
af = af_max
|
|
||||||
|
|
||||||
# Scale af to the range 0..1
|
|
||||||
af_scaled = (af - af_min) / (af_max - af_min)
|
|
||||||
# Rescale to the interval ef_min..ef_max
|
|
||||||
ef = ef_min + af_scaled * (ef_max - ef_min)
|
|
||||||
|
|
||||||
return ef
|
|
||||||
|
|
||||||
## DEFAULT IMPORTER METHODS
|
|
||||||
|
|
||||||
def foreignNotes(self) -> list[ForeignNote]:
|
|
||||||
# Load file and parse it by minidom
|
|
||||||
self.loadSource(self.file)
|
|
||||||
|
|
||||||
# Migrating content / time consuming part
|
|
||||||
# addItemToCards is called for each sm element
|
|
||||||
self.logger("Parsing started.")
|
|
||||||
self.parse()
|
|
||||||
self.logger("Parsing done.")
|
|
||||||
|
|
||||||
# Return imported cards
|
|
||||||
self.total = len(self.notes)
|
|
||||||
self.log.append("%d cards imported." % self.total)
|
|
||||||
return self.notes
|
|
||||||
|
|
||||||
def fields(self) -> int:
|
|
||||||
return 2
|
|
||||||
|
|
||||||
## PARSER METHODS
|
|
||||||
|
|
||||||
def addItemToCards(self, item: SuperMemoElement) -> None:
|
|
||||||
"This method actually do conversion"
|
|
||||||
|
|
||||||
# new anki card
|
|
||||||
note = ForeignNote()
|
|
||||||
|
|
||||||
# clean Q and A
|
|
||||||
note.fields.append(self._fudgeText(self._decode_htmlescapes(item.Question)))
|
|
||||||
note.fields.append(self._fudgeText(self._decode_htmlescapes(item.Answer)))
|
|
||||||
note.tags = []
|
|
||||||
|
|
||||||
# pre-process scheduling data
|
|
||||||
# convert learning data
|
|
||||||
if (
|
|
||||||
not self.META.resetLearningData
|
|
||||||
and int(item.Interval) >= 1
|
|
||||||
and getattr(item, "LastRepetition", None)
|
|
||||||
):
|
|
||||||
# migration of LearningData algorithm
|
|
||||||
tLastrep = time.mktime(time.strptime(item.LastRepetition, "%d.%m.%Y"))
|
|
||||||
tToday = time.time()
|
|
||||||
card = ForeignCard()
|
|
||||||
card.ivl = int(item.Interval)
|
|
||||||
card.lapses = int(item.Lapses)
|
|
||||||
card.reps = int(item.Repetitions) + int(item.Lapses)
|
|
||||||
nextDue = tLastrep + (float(item.Interval) * 86400.0)
|
|
||||||
remDays = int((nextDue - time.time()) / 86400)
|
|
||||||
card.due = self.col.sched.today + remDays
|
|
||||||
card.factor = int(
|
|
||||||
self._afactor2efactor(float(item.AFactor.replace(",", "."))) * 1000
|
|
||||||
)
|
|
||||||
note.cards[0] = card
|
|
||||||
|
|
||||||
# categories & tags
|
|
||||||
# it's worth to have every theme (tree structure of sm collection) stored in tags, but sometimes not
|
|
||||||
# you can deceide if you are going to tag all toppics or just that containing some pattern
|
|
||||||
tTaggTitle = False
|
|
||||||
for pattern in self.META.pathsToBeTagged:
|
|
||||||
if (
|
|
||||||
item.lTitle is not None
|
|
||||||
and pattern.lower() in " ".join(item.lTitle).lower()
|
|
||||||
):
|
|
||||||
tTaggTitle = True
|
|
||||||
break
|
|
||||||
if tTaggTitle or self.META.tagAllTopics:
|
|
||||||
# normalize - remove diacritic punctuation from unicode chars to ascii
|
|
||||||
item.lTitle = [self._unicode2ascii(topic) for topic in item.lTitle]
|
|
||||||
|
|
||||||
# Transform xyz / aaa / bbb / ccc on Title path to Tag xyzAaaBbbCcc
|
|
||||||
# clean things like [999] or [111-2222] from title path, example: xyz / [1000-1200] zyx / xyz
|
|
||||||
# clean whitespaces
|
|
||||||
# set Capital letters for first char of the word
|
|
||||||
tmp = list(
|
|
||||||
{re.sub(r"(\[[0-9]+\])", " ", i).replace("_", " ") for i in item.lTitle}
|
|
||||||
)
|
|
||||||
tmp = list({re.sub(r"(\W)", " ", i) for i in tmp})
|
|
||||||
tmp = list({re.sub("^[0-9 ]+$", "", i) for i in tmp})
|
|
||||||
tmp = list({capwords(i).replace(" ", "") for i in tmp})
|
|
||||||
tags = [j[0].lower() + j[1:] for j in tmp if j.strip() != ""]
|
|
||||||
|
|
||||||
note.tags += tags
|
|
||||||
|
|
||||||
if self.META.tagMemorizedItems and int(item.Interval) > 0:
|
|
||||||
note.tags.append("Memorized")
|
|
||||||
|
|
||||||
self.logger("Element tags\t- " + repr(note.tags), level=3)
|
|
||||||
|
|
||||||
self.notes.append(note)
|
|
||||||
|
|
||||||
def logger(self, text: str, level: int = 1) -> None:
|
|
||||||
"Wrapper for Anki logger"
|
|
||||||
|
|
||||||
dLevels = {0: "", 1: "Info", 2: "Verbose", 3: "Debug"}
|
|
||||||
if level <= self.META.loggerLevel:
|
|
||||||
# self.deck.updateProgress(_(text))
|
|
||||||
|
|
||||||
if self.META.logToStdOutput:
|
|
||||||
print(
|
|
||||||
self.__class__.__name__
|
|
||||||
+ " - "
|
|
||||||
+ dLevels[level].ljust(9)
|
|
||||||
+ " -\t"
|
|
||||||
+ text
|
|
||||||
)
|
|
||||||
|
|
||||||
# OPEN AND LOAD
|
|
||||||
def openAnything(self, source):
|
|
||||||
"""Open any source / actually only opening of files is used
|
|
||||||
@return an open handle which must be closed after use, i.e., handle.close()"""
|
|
||||||
|
|
||||||
if source == "-":
|
|
||||||
return sys.stdin
|
|
||||||
|
|
||||||
# try to open with urllib (if source is http, ftp, or file URL)
|
|
||||||
import urllib.error
|
|
||||||
import urllib.parse
|
|
||||||
import urllib.request
|
|
||||||
|
|
||||||
try:
|
|
||||||
return urllib.request.urlopen(source)
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# try to open with native open function (if source is pathname)
|
|
||||||
try:
|
|
||||||
return open(source, encoding="utf8")
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# treat source as string
|
|
||||||
import io
|
|
||||||
|
|
||||||
return io.StringIO(str(source))
|
|
||||||
|
|
||||||
def loadSource(self, source: str) -> None:
|
|
||||||
"""Load source file and parse with xml.dom.minidom"""
|
|
||||||
self.source = source
|
|
||||||
self.logger("Load started...")
|
|
||||||
sock = open(self.source, encoding="utf8")
|
|
||||||
self.xmldoc = minidom.parse(sock).documentElement
|
|
||||||
sock.close()
|
|
||||||
self.logger("Load done.")
|
|
||||||
|
|
||||||
# PARSE
|
|
||||||
def parse(self, node: Text | Element | None = None) -> None:
|
|
||||||
"Parse method - parses document elements"
|
|
||||||
|
|
||||||
if node is None and self.xmldoc is not None:
|
|
||||||
node = self.xmldoc
|
|
||||||
|
|
||||||
_method = "parse_%s" % node.__class__.__name__
|
|
||||||
if hasattr(self, _method):
|
|
||||||
parseMethod = getattr(self, _method)
|
|
||||||
parseMethod(node)
|
|
||||||
else:
|
|
||||||
self.logger("No handler for method %s" % _method, level=3)
|
|
||||||
|
|
||||||
def parse_Document(self, node):
|
|
||||||
"Parse XML document"
|
|
||||||
|
|
||||||
self.parse(node.documentElement)
|
|
||||||
|
|
||||||
def parse_Element(self, node: Element) -> None:
|
|
||||||
"Parse XML element"
|
|
||||||
|
|
||||||
_method = "do_%s" % node.tagName
|
|
||||||
if hasattr(self, _method):
|
|
||||||
handlerMethod = getattr(self, _method)
|
|
||||||
handlerMethod(node)
|
|
||||||
else:
|
|
||||||
self.logger("No handler for method %s" % _method, level=3)
|
|
||||||
# print traceback.print_exc()
|
|
||||||
|
|
||||||
def parse_Text(self, node: Text) -> None:
|
|
||||||
"Parse text inside elements. Text is stored into local buffer."
|
|
||||||
|
|
||||||
text = node.data
|
|
||||||
self.cntBuf.append(text)
|
|
||||||
|
|
||||||
# def parse_Comment(self, node):
|
|
||||||
# """
|
|
||||||
# Source can contain XML comments, but we ignore them
|
|
||||||
# """
|
|
||||||
# pass
|
|
||||||
|
|
||||||
# DO
|
|
||||||
def do_SuperMemoCollection(self, node: Element) -> None:
|
|
||||||
"Process SM Collection"
|
|
||||||
|
|
||||||
for child in node.childNodes:
|
|
||||||
self.parse(child)
|
|
||||||
|
|
||||||
def do_SuperMemoElement(self, node: Element) -> None:
|
|
||||||
"Process SM Element (Type - Title,Topics)"
|
|
||||||
|
|
||||||
self.logger("=" * 45, level=3)
|
|
||||||
|
|
||||||
self.cntElm.append(SuperMemoElement())
|
|
||||||
self.cntElm[-1]["lTitle"] = self.cntMeta["title"]
|
|
||||||
|
|
||||||
# parse all child elements
|
|
||||||
for child in node.childNodes:
|
|
||||||
self.parse(child)
|
|
||||||
|
|
||||||
# strip all saved strings, just for sure
|
|
||||||
for key in list(self.cntElm[-1].keys()):
|
|
||||||
if hasattr(self.cntElm[-1][key], "strip"):
|
|
||||||
self.cntElm[-1][key] = self.cntElm[-1][key].strip()
|
|
||||||
|
|
||||||
# pop current element
|
|
||||||
smel = self.cntElm.pop()
|
|
||||||
|
|
||||||
# Process cntElm if is valid Item (and not an Topic etc..)
|
|
||||||
# if smel.Lapses != None and smel.Interval != None and smel.Question != None and smel.Answer != None:
|
|
||||||
if smel.Title is None and smel.Question is not None and smel.Answer is not None:
|
|
||||||
if smel.Answer.strip() != "" and smel.Question.strip() != "":
|
|
||||||
# migrate only memorized otherway skip/continue
|
|
||||||
if self.META.onlyMemorizedItems and not (int(smel.Interval) > 0):
|
|
||||||
self.logger("Element skipped \t- not memorized ...", level=3)
|
|
||||||
else:
|
|
||||||
# import sm element data to Anki
|
|
||||||
self.addItemToCards(smel)
|
|
||||||
self.logger("Import element \t- " + smel["Question"], level=3)
|
|
||||||
|
|
||||||
# print element
|
|
||||||
self.logger("-" * 45, level=3)
|
|
||||||
for key in list(smel.keys()):
|
|
||||||
self.logger(
|
|
||||||
"\t{} {}".format((key + ":").ljust(15), smel[key]), level=3
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.logger("Element skipped \t- no valid Q and A ...", level=3)
|
|
||||||
|
|
||||||
else:
|
|
||||||
# now we know that item was topic
|
|
||||||
# parsing of whole node is now finished
|
|
||||||
|
|
||||||
# test if it's really topic
|
|
||||||
if smel.Title is not None:
|
|
||||||
# remove topic from title list
|
|
||||||
t = self.cntMeta["title"].pop()
|
|
||||||
self.logger("End of topic \t- %s" % (t), level=2)
|
|
||||||
|
|
||||||
def do_Content(self, node: Element) -> None:
|
|
||||||
"Process SM element Content"
|
|
||||||
|
|
||||||
for child in node.childNodes:
|
|
||||||
if hasattr(child, "tagName") and child.firstChild is not None:
|
|
||||||
self.cntElm[-1][child.tagName] = child.firstChild.data
|
|
||||||
|
|
||||||
def do_LearningData(self, node: Element) -> None:
|
|
||||||
"Process SM element LearningData"
|
|
||||||
|
|
||||||
for child in node.childNodes:
|
|
||||||
if hasattr(child, "tagName") and child.firstChild is not None:
|
|
||||||
self.cntElm[-1][child.tagName] = child.firstChild.data
|
|
||||||
|
|
||||||
# It's being processed in do_Content now
|
|
||||||
# def do_Question(self, node):
|
|
||||||
# for child in node.childNodes: self.parse(child)
|
|
||||||
# self.cntElm[-1][node.tagName]=self.cntBuf.pop()
|
|
||||||
|
|
||||||
# It's being processed in do_Content now
|
|
||||||
# def do_Answer(self, node):
|
|
||||||
# for child in node.childNodes: self.parse(child)
|
|
||||||
# self.cntElm[-1][node.tagName]=self.cntBuf.pop()
|
|
||||||
|
|
||||||
def do_Title(self, node: Element) -> None:
|
|
||||||
"Process SM element Title"
|
|
||||||
|
|
||||||
t = self._decode_htmlescapes(node.firstChild.data)
|
|
||||||
self.cntElm[-1][node.tagName] = t
|
|
||||||
self.cntMeta["title"].append(t)
|
|
||||||
self.cntElm[-1]["lTitle"] = self.cntMeta["title"]
|
|
||||||
self.logger("Start of topic \t- " + " / ".join(self.cntMeta["title"]), level=2)
|
|
||||||
|
|
||||||
def do_Type(self, node: Element) -> None:
|
|
||||||
"Process SM element Type"
|
|
||||||
|
|
||||||
if len(self.cntBuf) >= 1:
|
|
||||||
self.cntElm[-1][node.tagName] = self.cntBuf.pop()
|
|
||||||
|
|
||||||
|
|
||||||
# if __name__ == '__main__':
|
|
||||||
|
|
||||||
# for testing you can start it standalone
|
|
||||||
|
|
||||||
# file = u'/home/epcim/hg2g/dev/python/sm2anki/ADVENG2EXP.xxe.esc.zaloha_FINAL.xml'
|
|
||||||
# file = u'/home/epcim/hg2g/dev/python/anki/libanki/tests/importing/supermemo/original_ENGLISHFORBEGGINERS_noOEM.xml'
|
|
||||||
# file = u'/home/epcim/hg2g/dev/python/anki/libanki/tests/importing/supermemo/original_ENGLISHFORBEGGINERS_oem_1250.xml'
|
|
||||||
# file = str(sys.argv[1])
|
|
||||||
# impo = SupermemoXmlImporter(Deck(),file)
|
|
||||||
# impo.foreignCards()
|
|
||||||
|
|
||||||
# sys.exit(1)
|
|
||||||
|
|
||||||
# vim: ts=4 sts=2 ft=python
|
|
|
@ -18,7 +18,7 @@ from anki._legacy import DeprecatedNamesMixinForModule
|
||||||
TR = anki._fluent.LegacyTranslationEnum
|
TR = anki._fluent.LegacyTranslationEnum
|
||||||
FormatTimeSpan = _pb.FormatTimespanRequest
|
FormatTimeSpan = _pb.FormatTimespanRequest
|
||||||
|
|
||||||
|
# When adding new languages here, check lang_to_disk_lang() below
|
||||||
langs = sorted(
|
langs = sorted(
|
||||||
[
|
[
|
||||||
("Afrikaans", "af_ZA"),
|
("Afrikaans", "af_ZA"),
|
||||||
|
@ -38,6 +38,7 @@ langs = sorted(
|
||||||
("Italiano", "it_IT"),
|
("Italiano", "it_IT"),
|
||||||
("lo jbobau", "jbo_EN"),
|
("lo jbobau", "jbo_EN"),
|
||||||
("Lenga d'òc", "oc_FR"),
|
("Lenga d'òc", "oc_FR"),
|
||||||
|
("Қазақша", "kk_KZ"),
|
||||||
("Magyar", "hu_HU"),
|
("Magyar", "hu_HU"),
|
||||||
("Nederlands", "nl_NL"),
|
("Nederlands", "nl_NL"),
|
||||||
("Norsk", "nb_NO"),
|
("Norsk", "nb_NO"),
|
||||||
|
@ -64,6 +65,7 @@ langs = sorted(
|
||||||
("Українська мова", "uk_UA"),
|
("Українська мова", "uk_UA"),
|
||||||
("Հայերեն", "hy_AM"),
|
("Հայերեն", "hy_AM"),
|
||||||
("עִבְרִית", "he_IL"),
|
("עִבְרִית", "he_IL"),
|
||||||
|
("ייִדיש", "yi"),
|
||||||
("العربية", "ar_SA"),
|
("العربية", "ar_SA"),
|
||||||
("فارسی", "fa_IR"),
|
("فارسی", "fa_IR"),
|
||||||
("ภาษาไทย", "th_TH"),
|
("ภาษาไทย", "th_TH"),
|
||||||
|
@ -73,6 +75,7 @@ langs = sorted(
|
||||||
("ଓଡ଼ିଆ", "or_OR"),
|
("ଓଡ଼ିଆ", "or_OR"),
|
||||||
("Filipino", "tl"),
|
("Filipino", "tl"),
|
||||||
("ئۇيغۇر", "ug"),
|
("ئۇيغۇر", "ug"),
|
||||||
|
("Oʻzbekcha", "uz_UZ"),
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -103,6 +106,7 @@ compatMap = {
|
||||||
"it": "it_IT",
|
"it": "it_IT",
|
||||||
"ja": "ja_JP",
|
"ja": "ja_JP",
|
||||||
"jbo": "jbo_EN",
|
"jbo": "jbo_EN",
|
||||||
|
"kk": "kk_KZ",
|
||||||
"ko": "ko_KR",
|
"ko": "ko_KR",
|
||||||
"la": "la_LA",
|
"la": "la_LA",
|
||||||
"mn": "mn_MN",
|
"mn": "mn_MN",
|
||||||
|
@ -123,7 +127,9 @@ compatMap = {
|
||||||
"th": "th_TH",
|
"th": "th_TH",
|
||||||
"tr": "tr_TR",
|
"tr": "tr_TR",
|
||||||
"uk": "uk_UA",
|
"uk": "uk_UA",
|
||||||
|
"uz": "uz_UZ",
|
||||||
"vi": "vi_VN",
|
"vi": "vi_VN",
|
||||||
|
"yi": "yi",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -157,13 +163,13 @@ def lang_to_disk_lang(lang: str) -> str:
|
||||||
|
|
||||||
|
|
||||||
# the currently set interface language
|
# the currently set interface language
|
||||||
current_lang = "en" # pylint: disable=invalid-name
|
current_lang = "en"
|
||||||
|
|
||||||
# the current Fluent translation instance. Code in pylib/ should
|
# the current Fluent translation instance. Code in pylib/ should
|
||||||
# not reference this, and should use col.tr instead. The global
|
# not reference this, and should use col.tr instead. The global
|
||||||
# instance exists for legacy reasons, and as a convenience for the
|
# instance exists for legacy reasons, and as a convenience for the
|
||||||
# Qt code.
|
# Qt code.
|
||||||
current_i18n: anki._backend.RustBackend | None = None # pylint: disable=invalid-name
|
current_i18n: anki._backend.RustBackend | None = None
|
||||||
tr_legacyglobal = anki._backend.Translations(None)
|
tr_legacyglobal = anki._backend.Translations(None)
|
||||||
|
|
||||||
|
|
||||||
|
@ -178,7 +184,7 @@ def ngettext(single: str, plural: str, num: int) -> str:
|
||||||
|
|
||||||
|
|
||||||
def set_lang(lang: str) -> None:
|
def set_lang(lang: str) -> None:
|
||||||
global current_lang, current_i18n # pylint: disable=invalid-name
|
global current_lang, current_i18n
|
||||||
current_lang = lang
|
current_lang = lang
|
||||||
current_i18n = anki._backend.RustBackend(langs=[lang])
|
current_i18n = anki._backend.RustBackend(langs=[lang])
|
||||||
tr_legacyglobal.backend = weakref.ref(current_i18n)
|
tr_legacyglobal.backend = weakref.ref(current_i18n)
|
||||||
|
@ -231,7 +237,7 @@ def get_def_lang(user_lang: str | None = None) -> tuple[int, str]:
|
||||||
|
|
||||||
|
|
||||||
def is_rtl(lang: str) -> bool:
|
def is_rtl(lang: str) -> bool:
|
||||||
return lang in ("he", "ar", "fa", "ug")
|
return lang in ("he", "ar", "fa", "ug", "yi")
|
||||||
|
|
||||||
|
|
||||||
# strip off unicode isolation markers from a translated string
|
# strip off unicode isolation markers from a translated string
|
||||||
|
|
|
@ -10,7 +10,7 @@ import time
|
||||||
from collections.abc import Sequence
|
from collections.abc import Sequence
|
||||||
from typing import Any, NewType, Union
|
from typing import Any, NewType, Union
|
||||||
|
|
||||||
import anki # pylint: disable=unused-import
|
import anki
|
||||||
import anki.collection
|
import anki.collection
|
||||||
import anki.notes
|
import anki.notes
|
||||||
from anki import notetypes_pb2
|
from anki import notetypes_pb2
|
||||||
|
@ -419,7 +419,7 @@ and notes.mid = ? and cards.ord = ?""",
|
||||||
|
|
||||||
# legacy API - used by unit tests and add-ons
|
# legacy API - used by unit tests and add-ons
|
||||||
|
|
||||||
def change( # pylint: disable=invalid-name
|
def change(
|
||||||
self,
|
self,
|
||||||
notetype: NotetypeDict,
|
notetype: NotetypeDict,
|
||||||
nids: list[anki.notes.NoteId],
|
nids: list[anki.notes.NoteId],
|
||||||
|
@ -478,8 +478,6 @@ and notes.mid = ? and cards.ord = ?""",
|
||||||
# Legacy
|
# Legacy
|
||||||
##########################################################################
|
##########################################################################
|
||||||
|
|
||||||
# pylint: disable=invalid-name
|
|
||||||
|
|
||||||
@deprecated(info="use note.cloze_numbers_in_fields()")
|
@deprecated(info="use note.cloze_numbers_in_fields()")
|
||||||
def _availClozeOrds(
|
def _availClozeOrds(
|
||||||
self, notetype: NotetypeDict, flds: str, allow_empty: bool = True
|
self, notetype: NotetypeDict, flds: str, allow_empty: bool = True
|
||||||
|
|
|
@ -7,7 +7,7 @@ import copy
|
||||||
from collections.abc import Sequence
|
from collections.abc import Sequence
|
||||||
from typing import NewType
|
from typing import NewType
|
||||||
|
|
||||||
import anki # pylint: disable=unused-import
|
import anki
|
||||||
import anki.cards
|
import anki.cards
|
||||||
import anki.collection
|
import anki.collection
|
||||||
import anki.decks
|
import anki.decks
|
||||||
|
|
|
@ -4,10 +4,8 @@
|
||||||
# The backend code has moved into _backend; this file exists only to avoid breaking
|
# The backend code has moved into _backend; this file exists only to avoid breaking
|
||||||
# some add-ons. They should be updated to point to the correct location in the
|
# some add-ons. They should be updated to point to the correct location in the
|
||||||
# future.
|
# future.
|
||||||
#
|
|
||||||
# pylint: disable=unused-import
|
|
||||||
# pylint: enable=invalid-name
|
|
||||||
|
|
||||||
|
# ruff: noqa: F401
|
||||||
from anki.decks import DeckTreeNode
|
from anki.decks import DeckTreeNode
|
||||||
from anki.errors import InvalidInput, NotFoundError
|
from anki.errors import InvalidInput, NotFoundError
|
||||||
from anki.lang import TR
|
from anki.lang import TR
|
||||||
|
|
|
@ -42,6 +42,7 @@ from anki.utils import ids2str, int_time
|
||||||
|
|
||||||
class SchedulerBase(DeprecatedNamesMixin):
|
class SchedulerBase(DeprecatedNamesMixin):
|
||||||
"Actions shared between schedulers."
|
"Actions shared between schedulers."
|
||||||
|
|
||||||
version = 0
|
version = 0
|
||||||
|
|
||||||
def __init__(self, col: anki.collection.Collection) -> None:
|
def __init__(self, col: anki.collection.Collection) -> None:
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# Copyright: Ankitects Pty Ltd and contributors
|
# Copyright: Ankitects Pty Ltd and contributors
|
||||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
# pylint: disable=invalid-name
|
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# Copyright: Ankitects Pty Ltd and contributors
|
# Copyright: Ankitects Pty Ltd and contributors
|
||||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
# pylint: disable=invalid-name
|
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# Copyright: Ankitects Pty Ltd and contributors
|
# Copyright: Ankitects Pty Ltd and contributors
|
||||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
# pylint: disable=invalid-name
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
The V3/2021 scheduler.
|
The V3/2021 scheduler.
|
||||||
|
@ -184,7 +183,7 @@ class Scheduler(SchedulerBaseWithLegacy):
|
||||||
return self._interval_for_filtered_state(state.filtered)
|
return self._interval_for_filtered_state(state.filtered)
|
||||||
else:
|
else:
|
||||||
assert_exhaustive(kind)
|
assert_exhaustive(kind)
|
||||||
return 0 # pylint: disable=unreachable
|
return 0
|
||||||
|
|
||||||
def _interval_for_normal_state(
|
def _interval_for_normal_state(
|
||||||
self, normal: scheduler_pb2.SchedulingState.Normal
|
self, normal: scheduler_pb2.SchedulingState.Normal
|
||||||
|
@ -200,7 +199,7 @@ class Scheduler(SchedulerBaseWithLegacy):
|
||||||
return normal.relearning.learning.scheduled_secs
|
return normal.relearning.learning.scheduled_secs
|
||||||
else:
|
else:
|
||||||
assert_exhaustive(kind)
|
assert_exhaustive(kind)
|
||||||
return 0 # pylint: disable=unreachable
|
return 0
|
||||||
|
|
||||||
def _interval_for_filtered_state(
|
def _interval_for_filtered_state(
|
||||||
self, filtered: scheduler_pb2.SchedulingState.Filtered
|
self, filtered: scheduler_pb2.SchedulingState.Filtered
|
||||||
|
@ -212,7 +211,7 @@ class Scheduler(SchedulerBaseWithLegacy):
|
||||||
return self._interval_for_normal_state(filtered.rescheduling.original_state)
|
return self._interval_for_normal_state(filtered.rescheduling.original_state)
|
||||||
else:
|
else:
|
||||||
assert_exhaustive(kind)
|
assert_exhaustive(kind)
|
||||||
return 0 # pylint: disable=unreachable
|
return 0
|
||||||
|
|
||||||
def nextIvl(self, card: Card, ease: int) -> Any:
|
def nextIvl(self, card: Card, ease: int) -> Any:
|
||||||
"Don't use this - it is only required by tests, and will be moved in the future."
|
"Don't use this - it is only required by tests, and will be moved in the future."
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# Copyright: Ankitects Pty Ltd and contributors
|
# Copyright: Ankitects Pty Ltd and contributors
|
||||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
# pylint: disable=C
|
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
@ -27,7 +26,7 @@ def _legacy_card_stats(
|
||||||
col: anki.collection.Collection, card_id: anki.cards.CardId, include_revlog: bool
|
col: anki.collection.Collection, card_id: anki.cards.CardId, include_revlog: bool
|
||||||
) -> str:
|
) -> str:
|
||||||
"A quick hack to preserve compatibility with the old HTML string API."
|
"A quick hack to preserve compatibility with the old HTML string API."
|
||||||
random_id = f"cardinfo-{base62(random.randint(0, 2 ** 64 - 1))}"
|
random_id = f"cardinfo-{base62(random.randint(0, 2**64 - 1))}"
|
||||||
varName = random_id.replace("-", "")
|
varName = random_id.replace("-", "")
|
||||||
return f"""
|
return f"""
|
||||||
<div id="{random_id}"></div>
|
<div id="{random_id}"></div>
|
||||||
|
@ -174,7 +173,7 @@ from revlog where type != {REVLOG_RESCHED} and id > ? """
|
||||||
cards=cards, seconds=float(thetime)
|
cards=cards, seconds=float(thetime)
|
||||||
)
|
)
|
||||||
# again/pass count
|
# again/pass count
|
||||||
b += "<br>" + "Again count: %s" % bold(failed)
|
b += "<br>" + "Again count: %s" % bold(str(failed))
|
||||||
if cards:
|
if cards:
|
||||||
b += " " + "(%s correct)" % bold(
|
b += " " + "(%s correct)" % bold(
|
||||||
"%0.1f%%" % ((1 - failed / float(cards)) * 100)
|
"%0.1f%%" % ((1 - failed / float(cards)) * 100)
|
||||||
|
@ -182,7 +181,10 @@ from revlog where type != {REVLOG_RESCHED} and id > ? """
|
||||||
# type breakdown
|
# type breakdown
|
||||||
b += "<br>"
|
b += "<br>"
|
||||||
b += "Learn: %(a)s, Review: %(b)s, Relearn: %(c)s, Filtered: %(d)s" % dict(
|
b += "Learn: %(a)s, Review: %(b)s, Relearn: %(c)s, Filtered: %(d)s" % dict(
|
||||||
a=bold(lrn), b=bold(rev), c=bold(relrn), d=bold(filt)
|
a=bold(str(lrn)),
|
||||||
|
b=bold(str(rev)),
|
||||||
|
c=bold(str(relrn)),
|
||||||
|
d=bold(str(filt)),
|
||||||
)
|
)
|
||||||
# mature today
|
# mature today
|
||||||
mcnt, msum = self.col.db.first(
|
mcnt, msum = self.col.db.first(
|
||||||
|
@ -321,7 +323,6 @@ group by day order by day"""
|
||||||
yaxes=[dict(min=0), dict(position="right", min=0)],
|
yaxes=[dict(min=0), dict(position="right", min=0)],
|
||||||
)
|
)
|
||||||
if days is not None:
|
if days is not None:
|
||||||
# pylint: disable=invalid-unary-operand-type
|
|
||||||
conf["xaxis"]["min"] = -days + 0.5
|
conf["xaxis"]["min"] = -days + 0.5
|
||||||
|
|
||||||
def plot(id: str, data: Any, ylabel: str, ylabel2: str) -> str:
|
def plot(id: str, data: Any, ylabel: str, ylabel2: str) -> str:
|
||||||
|
@ -356,7 +357,6 @@ group by day order by day"""
|
||||||
yaxes=[dict(min=0), dict(position="right", min=0)],
|
yaxes=[dict(min=0), dict(position="right", min=0)],
|
||||||
)
|
)
|
||||||
if days is not None:
|
if days is not None:
|
||||||
# pylint: disable=invalid-unary-operand-type
|
|
||||||
conf["xaxis"]["min"] = -days + 0.5
|
conf["xaxis"]["min"] = -days + 0.5
|
||||||
|
|
||||||
def plot(id: str, data: Any, ylabel: str, ylabel2: str) -> str:
|
def plot(id: str, data: Any, ylabel: str, ylabel2: str) -> str:
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
# pylint: disable=invalid-name
|
|
||||||
|
|
||||||
# from subtlepatterns.com; CC BY 4.0.
|
# from subtlepatterns.com; CC BY 4.0.
|
||||||
# by Daniel Beaton
|
# by Daniel Beaton
|
||||||
# https://www.toptal.com/designers/subtlepatterns/fancy-deboss/
|
# https://www.toptal.com/designers/subtlepatterns/fancy-deboss/
|
||||||
|
|
|
@ -12,7 +12,6 @@ from anki import notetypes_pb2
|
||||||
from anki._legacy import DeprecatedNamesMixinForModule
|
from anki._legacy import DeprecatedNamesMixinForModule
|
||||||
from anki.utils import from_json_bytes
|
from anki.utils import from_json_bytes
|
||||||
|
|
||||||
# pylint: disable=no-member
|
|
||||||
StockNotetypeKind = notetypes_pb2.StockNotetype.Kind
|
StockNotetypeKind = notetypes_pb2.StockNotetype.Kind
|
||||||
|
|
||||||
# add-on authors can add ("note type name", function)
|
# add-on authors can add ("note type name", function)
|
||||||
|
|
|
@ -16,7 +16,7 @@ import re
|
||||||
from collections.abc import Collection, Sequence
|
from collections.abc import Collection, Sequence
|
||||||
from typing import Match
|
from typing import Match
|
||||||
|
|
||||||
import anki # pylint: disable=unused-import
|
import anki
|
||||||
import anki.collection
|
import anki.collection
|
||||||
from anki import tags_pb2
|
from anki import tags_pb2
|
||||||
from anki._legacy import DeprecatedNamesMixin, deprecated
|
from anki._legacy import DeprecatedNamesMixin, deprecated
|
||||||
|
|
|
@ -279,6 +279,7 @@ class TemplateRenderContext:
|
||||||
@dataclass
|
@dataclass
|
||||||
class TemplateRenderOutput:
|
class TemplateRenderOutput:
|
||||||
"Stores the rendered templates and extracted AV tags."
|
"Stores the rendered templates and extracted AV tags."
|
||||||
|
|
||||||
question_text: str
|
question_text: str
|
||||||
answer_text: str
|
answer_text: str
|
||||||
question_av_tags: list[AVTag]
|
question_av_tags: list[AVTag]
|
||||||
|
|
|
@ -24,7 +24,6 @@ from anki.dbproxy import DBProxy
|
||||||
_tmpdir: str | None
|
_tmpdir: str | None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# pylint: disable=c-extension-no-member
|
|
||||||
import orjson
|
import orjson
|
||||||
|
|
||||||
to_json_bytes: Callable[[Any], bytes] = orjson.dumps
|
to_json_bytes: Callable[[Any], bytes] = orjson.dumps
|
||||||
|
@ -156,12 +155,12 @@ def field_checksum(data: str) -> int:
|
||||||
# Temp files
|
# Temp files
|
||||||
##############################################################################
|
##############################################################################
|
||||||
|
|
||||||
_tmpdir = None # pylint: disable=invalid-name
|
_tmpdir = None
|
||||||
|
|
||||||
|
|
||||||
def tmpdir() -> str:
|
def tmpdir() -> str:
|
||||||
"A reusable temp folder which we clean out on each program invocation."
|
"A reusable temp folder which we clean out on each program invocation."
|
||||||
global _tmpdir # pylint: disable=invalid-name
|
global _tmpdir
|
||||||
if not _tmpdir:
|
if not _tmpdir:
|
||||||
|
|
||||||
def cleanup() -> None:
|
def cleanup() -> None:
|
||||||
|
@ -216,7 +215,6 @@ def call(argv: list[str], wait: bool = True, **kwargs: Any) -> int:
|
||||||
try:
|
try:
|
||||||
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type: ignore
|
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type: ignore
|
||||||
except Exception:
|
except Exception:
|
||||||
# pylint: disable=no-member
|
|
||||||
info.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW # type: ignore
|
info.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW # type: ignore
|
||||||
else:
|
else:
|
||||||
info = None
|
info = None
|
||||||
|
@ -282,7 +280,7 @@ def plat_desc() -> str:
|
||||||
elif is_win:
|
elif is_win:
|
||||||
theos = f"win:{platform.win32_ver()[0]}"
|
theos = f"win:{platform.win32_ver()[0]}"
|
||||||
elif system == "Linux":
|
elif system == "Linux":
|
||||||
import distro # pytype: disable=import-error # pylint: disable=import-error
|
import distro # pytype: disable=import-error
|
||||||
|
|
||||||
dist_id = distro.id()
|
dist_id = distro.id()
|
||||||
dist_version = distro.version()
|
dist_version = distro.version()
|
||||||
|
@ -309,12 +307,17 @@ def int_version() -> int:
|
||||||
"""Anki's version as an integer in the form YYMMPP, e.g. 230900.
|
"""Anki's version as an integer in the form YYMMPP, e.g. 230900.
|
||||||
(year, month, patch).
|
(year, month, patch).
|
||||||
In 2.1.x releases, this was just the last number."""
|
In 2.1.x releases, this was just the last number."""
|
||||||
|
import re
|
||||||
|
|
||||||
from anki.buildinfo import version
|
from anki.buildinfo import version
|
||||||
|
|
||||||
|
# Strip non-numeric characters (handles beta/rc suffixes like '25.02b1' or 'rc3')
|
||||||
|
numeric_version = re.sub(r"[^0-9.]", "", version)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
[year, month, patch] = version.split(".")
|
[year, month, patch] = numeric_version.split(".")
|
||||||
except ValueError:
|
except ValueError:
|
||||||
[year, month] = version.split(".")
|
[year, month] = numeric_version.split(".")
|
||||||
patch = "0"
|
patch = "0"
|
||||||
|
|
||||||
year_num = int(year)
|
year_num = int(year)
|
||||||
|
|
|
@ -35,8 +35,16 @@ class CustomBuildHook(BuildHookInterface):
|
||||||
|
|
||||||
assert generated_root.exists(), "you should build with --wheel"
|
assert generated_root.exists(), "you should build with --wheel"
|
||||||
for path in generated_root.rglob("*"):
|
for path in generated_root.rglob("*"):
|
||||||
if path.is_file():
|
if path.is_file() and not self._should_exclude(path):
|
||||||
relative_path = path.relative_to(generated_root)
|
relative_path = path.relative_to(generated_root)
|
||||||
# Place files under anki/ in the distribution
|
# Place files under anki/ in the distribution
|
||||||
dist_path = "anki" / relative_path
|
dist_path = "anki" / relative_path
|
||||||
force_include[str(path)] = str(dist_path)
|
force_include[str(path)] = str(dist_path)
|
||||||
|
|
||||||
|
def _should_exclude(self, path: Path) -> bool:
|
||||||
|
"""Check if a file should be excluded from the wheel."""
|
||||||
|
# Exclude __pycache__
|
||||||
|
path_str = str(path)
|
||||||
|
if "/__pycache__/" in path_str:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
|
@ -1,23 +1,18 @@
|
||||||
[project]
|
[project]
|
||||||
name = "anki"
|
name = "anki"
|
||||||
# dynamic = ["version"]
|
dynamic = ["version"]
|
||||||
version = "0.1.2"
|
|
||||||
requires-python = ">=3.9"
|
requires-python = ">=3.9"
|
||||||
license = "AGPL-3.0-or-later"
|
license = "AGPL-3.0-or-later"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"beautifulsoup4",
|
|
||||||
"decorator",
|
"decorator",
|
||||||
"markdown",
|
"markdown",
|
||||||
"orjson",
|
"orjson",
|
||||||
"protobuf>=4.21",
|
"protobuf>=6.0,<8.0",
|
||||||
"requests[socks]",
|
"requests[socks]",
|
||||||
|
# remove after we update to min python 3.11+
|
||||||
"typing_extensions",
|
"typing_extensions",
|
||||||
"types-protobuf",
|
|
||||||
"types-requests",
|
|
||||||
"types-orjson",
|
|
||||||
# platform-specific dependencies
|
# platform-specific dependencies
|
||||||
"distro; sys_platform != 'darwin' and sys_platform != 'win32'",
|
"distro; sys_platform != 'darwin' and sys_platform != 'win32'",
|
||||||
"psutil; sys_platform == 'win32'",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue