mirror of
https://github.com/ankitects/anki.git
synced 2025-09-18 14:02:21 -04:00
Migrate from slog to tracing
The Rust community appear to have converged on tracing - it's used by the Rust compiler, and receives close to 10x the number of downloads that slog does. Its API is more ergonomic, and it does a much nicer job with async rust. To make this change, we no longer pass around explicit loggers, and rely on a globally-registered one. The log file location has been changed from one in each profile folder to a single one in the base folder. This will remain empty for most users, since only errors are logged by default, but may be useful for debugging future changes.
This commit is contained in:
parent
687647af24
commit
0570cfdf48
30 changed files with 434 additions and 598 deletions
|
@ -11,9 +11,6 @@ unmaintained = "warn"
|
|||
yanked = "warn"
|
||||
notice = "warn"
|
||||
ignore = [
|
||||
# atty: we don't use a custom allocator on Windows, and our deps haven't
|
||||
# updated to alternatives yet
|
||||
"RUSTSEC-2021-0145",
|
||||
# xml-rs via apple-bundles: not used for untrusted input
|
||||
"RUSTSEC-2022-0048",
|
||||
]
|
||||
|
|
226
Cargo.lock
generated
226
Cargo.lock
generated
|
@ -125,15 +125,14 @@ dependencies = [
|
|||
"serde_repr",
|
||||
"serde_tuple",
|
||||
"sha1",
|
||||
"slog",
|
||||
"slog-async",
|
||||
"slog-envlogger",
|
||||
"slog-term",
|
||||
"snafu",
|
||||
"strum",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"tracing-appender",
|
||||
"tracing-subscriber",
|
||||
"unic-ucd-category",
|
||||
"unicase",
|
||||
"unicode-normalization",
|
||||
|
@ -191,12 +190,6 @@ dependencies = [
|
|||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
version = "1.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164"
|
||||
|
||||
[[package]]
|
||||
name = "archives"
|
||||
version = "0.0.0"
|
||||
|
@ -840,27 +833,6 @@ dependencies = [
|
|||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dirs-next"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"dirs-sys-next",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dirs-sys-next"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"redox_users",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "displaydoc"
|
||||
version = "0.2.3"
|
||||
|
@ -1897,6 +1869,15 @@ dependencies = [
|
|||
"tendril",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "matchers"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
|
||||
dependencies = [
|
||||
"regex-automata",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "matches"
|
||||
version = "0.1.9"
|
||||
|
@ -2040,6 +2021,16 @@ dependencies = [
|
|||
"minimal-lexical",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nu-ansi-term"
|
||||
version = "0.46.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
|
||||
dependencies = [
|
||||
"overload",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-format"
|
||||
version = "0.4.4"
|
||||
|
@ -2197,6 +2188,12 @@ version = "6.4.1"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee"
|
||||
|
||||
[[package]]
|
||||
name = "overload"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
|
||||
|
||||
[[package]]
|
||||
name = "p12"
|
||||
version = "0.6.3"
|
||||
|
@ -2853,17 +2850,6 @@ dependencies = [
|
|||
"bitflags",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_users"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b"
|
||||
dependencies = [
|
||||
"getrandom 0.2.8",
|
||||
"redox_syscall",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.7.0"
|
||||
|
@ -2880,6 +2866,9 @@ name = "regex-automata"
|
|||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
|
||||
dependencies = [
|
||||
"regex-syntax",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
|
@ -3279,6 +3268,15 @@ dependencies = [
|
|||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sharded-slab"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "shared_child"
|
||||
version = "1.0.0"
|
||||
|
@ -3319,74 +3317,6 @@ dependencies = [
|
|||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slog"
|
||||
version = "2.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06"
|
||||
|
||||
[[package]]
|
||||
name = "slog-async"
|
||||
version = "2.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "766c59b252e62a34651412870ff55d8c4e6d04df19b43eecb2703e417b097ffe"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"slog",
|
||||
"take_mut",
|
||||
"thread_local",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slog-envlogger"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "906a1a0bc43fed692df4b82a5e2fbfc3733db8dad8bb514ab27a4f23ad04f5c0"
|
||||
dependencies = [
|
||||
"log",
|
||||
"regex",
|
||||
"slog",
|
||||
"slog-async",
|
||||
"slog-scope",
|
||||
"slog-stdlog",
|
||||
"slog-term",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slog-scope"
|
||||
version = "4.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2f95a4b4c3274cd2869549da82b57ccc930859bdbf5bcea0424bc5f140b3c786"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"lazy_static",
|
||||
"slog",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slog-stdlog"
|
||||
version = "4.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6706b2ace5bbae7291d3f8d2473e2bfab073ccd7d03670946197aec98471fa3e"
|
||||
dependencies = [
|
||||
"log",
|
||||
"slog",
|
||||
"slog-scope",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slog-term"
|
||||
version = "2.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87d29185c55b7b258b4f120eab00f48557d4d9bc814f41713f449d35b0f8977c"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"slog",
|
||||
"term",
|
||||
"thread_local",
|
||||
"time",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.10.0"
|
||||
|
@ -3515,12 +3445,6 @@ dependencies = [
|
|||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "take_mut"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60"
|
||||
|
||||
[[package]]
|
||||
name = "tar"
|
||||
version = "0.4.38"
|
||||
|
@ -3563,17 +3487,6 @@ dependencies = [
|
|||
"utf-8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "term"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f"
|
||||
dependencies = [
|
||||
"dirs-next",
|
||||
"rustversion",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "1.1.3"
|
||||
|
@ -3797,9 +3710,32 @@ checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8"
|
|||
dependencies = [
|
||||
"cfg-if",
|
||||
"pin-project-lite",
|
||||
"tracing-attributes",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-appender"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"time",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-attributes"
|
||||
version = "0.1.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-core"
|
||||
version = "0.1.30"
|
||||
|
@ -3807,6 +3743,36 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"valuable",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-log"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"log",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-subscriber"
|
||||
version = "0.3.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70"
|
||||
dependencies = [
|
||||
"matchers",
|
||||
"nu-ansi-term",
|
||||
"once_cell",
|
||||
"regex",
|
||||
"sharded-slab",
|
||||
"smallvec",
|
||||
"thread_local",
|
||||
"tracing",
|
||||
"tracing-core",
|
||||
"tracing-log",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -4049,6 +4015,12 @@ dependencies = [
|
|||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "valuable"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d"
|
||||
|
||||
[[package]]
|
||||
name = "vcpkg"
|
||||
version = "0.2.15"
|
||||
|
|
|
@ -89,15 +89,6 @@
|
|||
"license_file": null,
|
||||
"description": "Flexible concrete Error type built on std::error::Error"
|
||||
},
|
||||
{
|
||||
"name": "arc-swap",
|
||||
"version": "1.5.1",
|
||||
"authors": "Michal 'vorner' Vaner <vorner@vorner.cz>",
|
||||
"repository": "https://github.com/vorner/arc-swap",
|
||||
"license": "Apache-2.0 OR MIT",
|
||||
"license_file": null,
|
||||
"description": "Atomically swappable Arc"
|
||||
},
|
||||
{
|
||||
"name": "arrayref",
|
||||
"version": "0.3.6",
|
||||
|
@ -134,15 +125,6 @@
|
|||
"license_file": null,
|
||||
"description": "Type erasure for async trait methods"
|
||||
},
|
||||
{
|
||||
"name": "atty",
|
||||
"version": "0.2.14",
|
||||
"authors": "softprops <d.tangren@gmail.com>",
|
||||
"repository": "https://github.com/softprops/atty",
|
||||
"license": "MIT",
|
||||
"license_file": null,
|
||||
"description": "A simple interface for querying atty"
|
||||
},
|
||||
{
|
||||
"name": "autocfg",
|
||||
"version": "1.1.0",
|
||||
|
@ -467,24 +449,6 @@
|
|||
"license_file": null,
|
||||
"description": "Traits for cryptographic hash functions and message authentication codes"
|
||||
},
|
||||
{
|
||||
"name": "dirs-next",
|
||||
"version": "2.0.0",
|
||||
"authors": "The @xdg-rs members",
|
||||
"repository": "https://github.com/xdg-rs/dirs",
|
||||
"license": "Apache-2.0 OR MIT",
|
||||
"license_file": null,
|
||||
"description": "A tiny low-level library that provides platform-specific standard locations of directories for config, cache and other data on Linux, Windows, macOS and Redox by leveraging the mechanisms defined by the XDG base/user directory specifications on Linux, the Known Folder API on Windows, and the Standard Directory guidelines on macOS."
|
||||
},
|
||||
{
|
||||
"name": "dirs-sys-next",
|
||||
"version": "0.1.2",
|
||||
"authors": "The @xdg-rs members",
|
||||
"repository": "https://github.com/xdg-rs/dirs/tree/master/dirs-sys",
|
||||
"license": "Apache-2.0 OR MIT",
|
||||
"license_file": null,
|
||||
"description": "system-level helper functions for the dirs and directories crates"
|
||||
},
|
||||
{
|
||||
"name": "displaydoc",
|
||||
"version": "0.2.3",
|
||||
|
@ -1205,6 +1169,15 @@
|
|||
"license_file": null,
|
||||
"description": "Common code for xml5ever and html5ever"
|
||||
},
|
||||
{
|
||||
"name": "matchers",
|
||||
"version": "0.1.0",
|
||||
"authors": "Eliza Weisman <eliza@buoyant.io>",
|
||||
"repository": "https://github.com/hawkw/matchers",
|
||||
"license": "MIT",
|
||||
"license_file": null,
|
||||
"description": "Regex matching on character and byte streams."
|
||||
},
|
||||
{
|
||||
"name": "matches",
|
||||
"version": "0.1.9",
|
||||
|
@ -1313,6 +1286,15 @@
|
|||
"license_file": null,
|
||||
"description": "A byte-oriented, zero-copy, parser combinators library"
|
||||
},
|
||||
{
|
||||
"name": "nu-ansi-term",
|
||||
"version": "0.46.0",
|
||||
"authors": "ogham@bsago.me|Ryan Scheel (Havvy) <ryan.havvy@gmail.com>|Josh Triplett <josh@joshtriplett.org>|The Nushell Project Developers",
|
||||
"repository": "https://github.com/nushell/nu-ansi-term",
|
||||
"license": "MIT",
|
||||
"license_file": null,
|
||||
"description": "Library for ANSI terminal colors and styles (bold, underline)"
|
||||
},
|
||||
{
|
||||
"name": "num-format",
|
||||
"version": "0.4.4",
|
||||
|
@ -1439,6 +1421,15 @@
|
|||
"license_file": null,
|
||||
"description": "FFI bindings to OpenSSL"
|
||||
},
|
||||
{
|
||||
"name": "overload",
|
||||
"version": "0.1.1",
|
||||
"authors": "Daniel Salvadori <danaugrs@gmail.com>",
|
||||
"repository": "https://github.com/danaugrs/overload",
|
||||
"license": "MIT",
|
||||
"license_file": null,
|
||||
"description": "Provides a macro to simplify operator overloading."
|
||||
},
|
||||
{
|
||||
"name": "parking_lot",
|
||||
"version": "0.12.1",
|
||||
|
@ -1754,15 +1745,6 @@
|
|||
"license_file": null,
|
||||
"description": "A Rust library to access raw Redox system calls"
|
||||
},
|
||||
{
|
||||
"name": "redox_users",
|
||||
"version": "0.4.3",
|
||||
"authors": "Jose Narvaez <goyox86@gmail.com>|Wesley Hershberger <mggmugginsmc@gmail.com>",
|
||||
"repository": "https://gitlab.redox-os.org/redox-os/users",
|
||||
"license": "MIT",
|
||||
"license_file": null,
|
||||
"description": "A Rust library to access Redox users and groups functionality"
|
||||
},
|
||||
{
|
||||
"name": "regex",
|
||||
"version": "1.7.0",
|
||||
|
@ -2042,6 +2024,15 @@
|
|||
"license_file": null,
|
||||
"description": "Pure Rust implementation of the SHA-2 hash function family including SHA-224, SHA-256, SHA-384, and SHA-512."
|
||||
},
|
||||
{
|
||||
"name": "sharded-slab",
|
||||
"version": "0.1.4",
|
||||
"authors": "Eliza Weisman <eliza@buoyant.io>",
|
||||
"repository": "https://github.com/hawkw/sharded-slab",
|
||||
"license": "MIT",
|
||||
"license_file": null,
|
||||
"description": "A lock-free concurrent slab."
|
||||
},
|
||||
{
|
||||
"name": "signal-hook-registry",
|
||||
"version": "1.4.0",
|
||||
|
@ -2069,60 +2060,6 @@
|
|||
"license_file": null,
|
||||
"description": "Pre-allocated storage for a uniform data type"
|
||||
},
|
||||
{
|
||||
"name": "slog",
|
||||
"version": "2.7.0",
|
||||
"authors": "Dawid Ciężarkiewicz <dpc@dpc.pw>",
|
||||
"repository": "https://github.com/slog-rs/slog",
|
||||
"license": "Apache-2.0 OR MIT OR MPL-2.0",
|
||||
"license_file": null,
|
||||
"description": "Structured, extensible, composable logging for Rust"
|
||||
},
|
||||
{
|
||||
"name": "slog-async",
|
||||
"version": "2.7.0",
|
||||
"authors": "Dawid Ciężarkiewicz <dpc@dpc.pw>",
|
||||
"repository": "https://github.com/slog-rs/async",
|
||||
"license": "Apache-2.0 OR MIT OR MPL-2.0",
|
||||
"license_file": null,
|
||||
"description": "Asynchronous drain for slog-rs"
|
||||
},
|
||||
{
|
||||
"name": "slog-envlogger",
|
||||
"version": "2.2.0",
|
||||
"authors": "The Rust Project Developers|Dawid Ciężarkiewicz <dpc@dpc.pw>",
|
||||
"repository": "https://github.com/slog-rs/envlogger",
|
||||
"license": "Apache-2.0 OR MIT",
|
||||
"license_file": null,
|
||||
"description": "Port of de facto standard logger implementation for Rust, to `slog-rs` framework."
|
||||
},
|
||||
{
|
||||
"name": "slog-scope",
|
||||
"version": "4.4.0",
|
||||
"authors": "Dawid Ciężarkiewicz <dpc@dpc.pw>",
|
||||
"repository": "https://github.com/slog-rs/scope",
|
||||
"license": "Apache-2.0 OR MIT OR MPL-2.0",
|
||||
"license_file": null,
|
||||
"description": "Logging scopes for slog-rs"
|
||||
},
|
||||
{
|
||||
"name": "slog-stdlog",
|
||||
"version": "4.1.1",
|
||||
"authors": "Dawid Ciężarkiewicz <dpc@dpc.pw>",
|
||||
"repository": "https://github.com/slog-rs/stdlog",
|
||||
"license": "Apache-2.0 OR MIT OR MPL-2.0",
|
||||
"license_file": null,
|
||||
"description": "`log` crate adapter for slog-rs"
|
||||
},
|
||||
{
|
||||
"name": "slog-term",
|
||||
"version": "2.9.0",
|
||||
"authors": "Dawid Ciężarkiewicz <dpc@dpc.pw>",
|
||||
"repository": "https://github.com/slog-rs/term",
|
||||
"license": "Apache-2.0 OR MIT OR MPL-2.0",
|
||||
"license_file": null,
|
||||
"description": "Unix terminal drain and formatter for slog-rs"
|
||||
},
|
||||
{
|
||||
"name": "smallvec",
|
||||
"version": "1.10.0",
|
||||
|
@ -2231,15 +2168,6 @@
|
|||
"license_file": null,
|
||||
"description": "Parser for Rust source code"
|
||||
},
|
||||
{
|
||||
"name": "take_mut",
|
||||
"version": "0.2.2",
|
||||
"authors": "Sgeo <sgeoster@gmail.com>",
|
||||
"repository": "https://github.com/Sgeo/take_mut",
|
||||
"license": "MIT",
|
||||
"license_file": null,
|
||||
"description": "Take a T from a &mut T temporarily"
|
||||
},
|
||||
{
|
||||
"name": "tempfile",
|
||||
"version": "3.3.0",
|
||||
|
@ -2258,15 +2186,6 @@
|
|||
"license_file": null,
|
||||
"description": "Compact buffer/string type for zero-copy parsing"
|
||||
},
|
||||
{
|
||||
"name": "term",
|
||||
"version": "0.7.0",
|
||||
"authors": "The Rust Project Developers|Steven Allen",
|
||||
"repository": "https://github.com/Stebalien/term",
|
||||
"license": "Apache-2.0 OR MIT",
|
||||
"license_file": null,
|
||||
"description": "A terminal formatting library"
|
||||
},
|
||||
{
|
||||
"name": "termcolor",
|
||||
"version": "1.1.3",
|
||||
|
@ -2447,6 +2366,24 @@
|
|||
"license_file": null,
|
||||
"description": "Application-level tracing for Rust."
|
||||
},
|
||||
{
|
||||
"name": "tracing-appender",
|
||||
"version": "0.2.2",
|
||||
"authors": "Zeki Sherif <zekshi@amazon.com>|Tokio Contributors <team@tokio.rs>",
|
||||
"repository": "https://github.com/tokio-rs/tracing",
|
||||
"license": "MIT",
|
||||
"license_file": null,
|
||||
"description": "Provides utilities for file appenders and making non-blocking writers."
|
||||
},
|
||||
{
|
||||
"name": "tracing-attributes",
|
||||
"version": "0.1.23",
|
||||
"authors": "Tokio Contributors <team@tokio.rs>|Eliza Weisman <eliza@buoyant.io>|David Barsky <dbarsky@amazon.com>",
|
||||
"repository": "https://github.com/tokio-rs/tracing",
|
||||
"license": "MIT",
|
||||
"license_file": null,
|
||||
"description": "Procedural macro attributes for automatically instrumenting functions."
|
||||
},
|
||||
{
|
||||
"name": "tracing-core",
|
||||
"version": "0.1.30",
|
||||
|
@ -2456,6 +2393,24 @@
|
|||
"license_file": null,
|
||||
"description": "Core primitives for application-level tracing."
|
||||
},
|
||||
{
|
||||
"name": "tracing-log",
|
||||
"version": "0.1.3",
|
||||
"authors": "Tokio Contributors <team@tokio.rs>",
|
||||
"repository": "https://github.com/tokio-rs/tracing",
|
||||
"license": "MIT",
|
||||
"license_file": null,
|
||||
"description": "Provides compatibility between `tracing` and the `log` crate."
|
||||
},
|
||||
{
|
||||
"name": "tracing-subscriber",
|
||||
"version": "0.3.16",
|
||||
"authors": "Eliza Weisman <eliza@buoyant.io>|David Barsky <me@davidbarsky.com>|Tokio Contributors <team@tokio.rs>",
|
||||
"repository": "https://github.com/tokio-rs/tracing",
|
||||
"license": "MIT",
|
||||
"license_file": null,
|
||||
"description": "Utilities for implementing and composing `tracing` subscribers."
|
||||
},
|
||||
{
|
||||
"name": "try-lock",
|
||||
"version": "0.2.3",
|
||||
|
@ -2654,6 +2609,15 @@
|
|||
"license_file": null,
|
||||
"description": "A missing utime function for Rust."
|
||||
},
|
||||
{
|
||||
"name": "valuable",
|
||||
"version": "0.1.0",
|
||||
"authors": null,
|
||||
"repository": "https://github.com/tokio-rs/valuable",
|
||||
"license": "MIT",
|
||||
"license_file": null,
|
||||
"description": "Object-safe value inspection, used to pass un-typed structured data across trait-object boundaries."
|
||||
},
|
||||
{
|
||||
"name": "vcpkg",
|
||||
"version": "0.2.15",
|
||||
|
|
|
@ -34,7 +34,6 @@ message OpenCollectionRequest {
|
|||
string collection_path = 1;
|
||||
string media_folder_path = 2;
|
||||
string media_db_path = 3;
|
||||
string log_path = 4;
|
||||
}
|
||||
|
||||
message CloseCollectionRequest {
|
||||
|
|
|
@ -62,11 +62,14 @@ class RustBackend(RustBackendGenerated):
|
|||
public method.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def initialize_logging(path: str | None = None) -> None:
|
||||
_rsbridge.initialize_logging(path)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
langs: list[str] | None = None,
|
||||
server: bool = False,
|
||||
log_file: str | None = None,
|
||||
) -> None:
|
||||
# pick up global defaults if not provided
|
||||
if langs is None:
|
||||
|
@ -76,7 +79,7 @@ class RustBackend(RustBackendGenerated):
|
|||
preferred_langs=langs,
|
||||
server=server,
|
||||
)
|
||||
self._backend = _rsbridge.open_backend(init_msg.SerializeToString(), log_file)
|
||||
self._backend = _rsbridge.open_backend(init_msg.SerializeToString())
|
||||
|
||||
def db_query(
|
||||
self, sql: str, args: Sequence[ValueForDB], first_row_only: bool
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
def buildhash() -> str: ...
|
||||
def open_backend(data: bytes, log_file: str | None) -> Backend: ...
|
||||
def open_backend(data: bytes) -> Backend: ...
|
||||
def initialize_logging(log_file: str | None) -> Backend: ...
|
||||
|
||||
class Backend:
|
||||
@classmethod
|
||||
|
|
|
@ -117,16 +117,19 @@ ExportLimit = Union[DeckIdLimit, NoteIdsLimit, CardIdsLimit, None]
|
|||
class Collection(DeprecatedNamesMixin):
|
||||
sched: V1Scheduler | V2Scheduler | V3Scheduler
|
||||
|
||||
@staticmethod
|
||||
def initialize_backend_logging(path: str | None = None) -> None:
|
||||
"""Enable terminal and optional file-based logging. Must be called only once."""
|
||||
RustBackend.initialize_logging(path)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path: str,
|
||||
backend: RustBackend | None = None,
|
||||
server: bool = False,
|
||||
log: bool = False,
|
||||
) -> None:
|
||||
self._backend = backend or RustBackend(server=server)
|
||||
self.db: DBProxy | None = None
|
||||
self._should_log = log
|
||||
self.server = server
|
||||
self.path = os.path.abspath(path)
|
||||
self.reopen()
|
||||
|
@ -300,18 +303,12 @@ class Collection(DeprecatedNamesMixin):
|
|||
|
||||
(media_dir, media_db) = media_paths_from_col_path(self.path)
|
||||
|
||||
log_path = ""
|
||||
should_log = not self.server and self._should_log
|
||||
if should_log:
|
||||
log_path = self.path.replace(".anki2", ".log")
|
||||
|
||||
# connect
|
||||
if not after_full_sync:
|
||||
self._backend.open_collection(
|
||||
collection_path=self.path,
|
||||
media_folder_path=media_dir,
|
||||
media_db_path=media_db,
|
||||
log_path=log_path,
|
||||
)
|
||||
self.db = DBProxy(weakref.proxy(self._backend))
|
||||
self.db.begin()
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
use anki::{
|
||||
backend::{init_backend, Backend as RustBackend},
|
||||
log::default_logger,
|
||||
log::set_global_logger,
|
||||
};
|
||||
use pyo3::{
|
||||
create_exception, exceptions::PyException, prelude::*, types::PyBytes, wrap_pyfunction,
|
||||
|
@ -22,12 +22,13 @@ fn buildhash() -> &'static str {
|
|||
}
|
||||
|
||||
#[pyfunction]
|
||||
fn open_backend(init_msg: &PyBytes, log_file: Option<String>) -> PyResult<Backend> {
|
||||
let log = match default_logger(log_file.as_deref()) {
|
||||
Ok(log) => Some(log),
|
||||
Err(e) => return Err(PyException::new_err(e)),
|
||||
};
|
||||
match init_backend(init_msg.as_bytes(), log) {
|
||||
fn initialize_logging(path: Option<&str>) -> PyResult<()> {
|
||||
set_global_logger(path).map_err(|e| PyException::new_err(e.to_string()))
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
fn open_backend(init_msg: &PyBytes) -> PyResult<Backend> {
|
||||
match init_backend(init_msg.as_bytes()) {
|
||||
Ok(backend) => Ok(Backend { backend }),
|
||||
Err(e) => Err(PyException::new_err(e)),
|
||||
}
|
||||
|
@ -74,6 +75,7 @@ fn _rsbridge(_py: Python, m: &PyModule) -> PyResult<()> {
|
|||
m.add_class::<Backend>()?;
|
||||
m.add_wrapped(wrap_pyfunction!(buildhash)).unwrap();
|
||||
m.add_wrapped(wrap_pyfunction!(open_backend)).unwrap();
|
||||
m.add_wrapped(wrap_pyfunction!(initialize_logging)).unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -536,14 +536,19 @@ def _run(argv: Optional[list[str]] = None, exec: bool = True) -> Optional[AnkiAp
|
|||
print("You can force it on with an env var: ANKI_WAYLAND=1")
|
||||
os.environ["QT_QPA_PLATFORM"] = "xcb"
|
||||
|
||||
# default to specified/system language before getting user's preference so that we can localize some more strings
|
||||
lang = anki.lang.get_def_lang(opts.lang)
|
||||
anki.lang.set_lang(lang[1])
|
||||
|
||||
# profile manager
|
||||
i18n_setup = False
|
||||
pm = None
|
||||
try:
|
||||
pm = ProfileManager(opts.base)
|
||||
base_folder = ProfileManager.get_created_base_folder(opts.base)
|
||||
Collection.initialize_backend_logging(str(base_folder / "anki.log"))
|
||||
|
||||
# default to specified/system language before getting user's preference so that we can localize some more strings
|
||||
lang = anki.lang.get_def_lang(opts.lang)
|
||||
anki.lang.set_lang(lang[1])
|
||||
i18n_setup = True
|
||||
|
||||
pm = ProfileManager(base_folder)
|
||||
pmLoadResult = pm.setupMeta()
|
||||
except:
|
||||
# will handle below
|
||||
|
@ -582,11 +587,14 @@ def _run(argv: Optional[list[str]] = None, exec: bool = True) -> Optional[AnkiAp
|
|||
return None
|
||||
|
||||
if not pm:
|
||||
QMessageBox.critical(
|
||||
None,
|
||||
tr.qt_misc_error(),
|
||||
tr.profiles_could_not_create_data_folder(),
|
||||
)
|
||||
if i18n_setup:
|
||||
QMessageBox.critical(
|
||||
None,
|
||||
tr.qt_misc_error(),
|
||||
tr.profiles_could_not_create_data_folder(),
|
||||
)
|
||||
else:
|
||||
QMessageBox.critical(None, "Startup Failed", "Unable to create data folder")
|
||||
return None
|
||||
|
||||
# disable icons on mac; this must be done before window created
|
||||
|
|
|
@ -115,15 +115,14 @@ class LoadMetaResult:
|
|||
|
||||
|
||||
class ProfileManager:
|
||||
def __init__(self, base: str | None = None) -> None: #
|
||||
def __init__(self, base: Path) -> None: #
|
||||
"base should be retrieved via ProfileMangager.get_created_base_folder"
|
||||
## Settings which should be forgotten each Anki restart
|
||||
self.session: dict[str, Any] = {}
|
||||
self.name: str | None = None
|
||||
self.db: DB | None = None
|
||||
self.profile: dict | None = None
|
||||
# instantiate base folder
|
||||
self.base: str
|
||||
self._setBaseFolder(base)
|
||||
self.base = str(base)
|
||||
|
||||
def setupMeta(self) -> LoadMetaResult:
|
||||
# load metadata
|
||||
|
@ -144,12 +143,6 @@ class ProfileManager:
|
|||
except TypeError as exc:
|
||||
raise Exception("Provided profile does not exist.") from exc
|
||||
|
||||
# Base creation
|
||||
######################################################################
|
||||
|
||||
def ensureBaseExists(self) -> None:
|
||||
self._ensureExists(self.base)
|
||||
|
||||
# Profile load/save
|
||||
######################################################################
|
||||
|
||||
|
@ -337,16 +330,19 @@ class ProfileManager:
|
|||
os.makedirs(path)
|
||||
return path
|
||||
|
||||
def _setBaseFolder(self, cmdlineBase: str | None) -> None:
|
||||
if cmdlineBase:
|
||||
self.base = os.path.abspath(cmdlineBase)
|
||||
elif os.environ.get("ANKI_BASE"):
|
||||
self.base = os.path.abspath(os.environ["ANKI_BASE"])
|
||||
else:
|
||||
self.base = self._defaultBase()
|
||||
self.ensureBaseExists()
|
||||
@staticmethod
|
||||
def get_created_base_folder(path_override: str | None) -> Path:
|
||||
"Create the base folder and return it, using provided path or default."
|
||||
path = Path(
|
||||
path_override
|
||||
or os.environ.get("ANKI_BASE")
|
||||
or ProfileManager._default_base()
|
||||
)
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
return path
|
||||
|
||||
def _defaultBase(self) -> str:
|
||||
@staticmethod
|
||||
def _default_base() -> str:
|
||||
if is_win:
|
||||
from aqt.winpaths import get_appdata
|
||||
|
||||
|
|
|
@ -87,15 +87,14 @@ serde_json = "1.0.89"
|
|||
serde_repr = "0.1.9"
|
||||
serde_tuple = "0.5.0"
|
||||
sha1 = "0.10.5"
|
||||
slog = { version = "2.7.0", features = ["max_level_trace", "release_max_level_debug"] }
|
||||
slog-async = "2.7.0"
|
||||
slog-envlogger = "2.2.0"
|
||||
slog-term = "2.9.0"
|
||||
snafu = { version = "0.7.3", features = ["backtraces"] }
|
||||
strum = { version = "0.24.1", features = ["derive"] }
|
||||
tempfile = "3.3.0"
|
||||
tokio = { version = "1.22", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-util = { version = "0.7.4", features = ["io"] }
|
||||
tracing = { version = "0.1.37", features = ["max_level_trace", "release_max_level_debug"] }
|
||||
tracing-appender = "0.2.2"
|
||||
tracing-subscriber = { version = "0.3.16", features = ["fmt", "env-filter"] }
|
||||
unic-ucd-category = "0.9.0"
|
||||
unicode-normalization = "0.1.22"
|
||||
utime = "0.3.1"
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
use std::sync::MutexGuard;
|
||||
|
||||
use slog::error;
|
||||
use tracing::error;
|
||||
|
||||
use super::{progress::Progress, Backend};
|
||||
pub(super) use crate::pb::collection::collection_service::Service as CollectionService;
|
||||
|
@ -37,11 +37,6 @@ impl CollectionService for Backend {
|
|||
.set_media_paths(input.media_folder_path, input.media_db_path)
|
||||
.set_server(self.server)
|
||||
.set_tr(self.tr.clone());
|
||||
if !input.log_path.is_empty() {
|
||||
builder.set_log_file(&input.log_path)?;
|
||||
} else {
|
||||
builder.set_logger(self.log.clone());
|
||||
}
|
||||
|
||||
*guard = Some(builder.build()?);
|
||||
|
||||
|
@ -63,7 +58,7 @@ impl CollectionService for Backend {
|
|||
let col_inner = guard.take().unwrap();
|
||||
|
||||
if let Err(e) = col_inner.close(desired_version) {
|
||||
error!(self.log, " failed: {:?}", e);
|
||||
error!(" failed: {:?}", e);
|
||||
}
|
||||
|
||||
Ok(().into())
|
||||
|
|
|
@ -47,7 +47,6 @@ impl ImportExportService for Backend {
|
|||
Path::new(&input.media_folder),
|
||||
Path::new(&input.media_db),
|
||||
self.import_progress_fn(),
|
||||
&self.log,
|
||||
)
|
||||
.map(Into::into)
|
||||
}
|
||||
|
|
|
@ -37,7 +37,6 @@ use std::{
|
|||
use once_cell::sync::OnceCell;
|
||||
use progress::AbortHandleSlot;
|
||||
use prost::Message;
|
||||
use slog::Logger;
|
||||
use tokio::runtime::{
|
||||
Runtime, {self},
|
||||
};
|
||||
|
@ -62,7 +61,7 @@ use self::{
|
|||
sync::{SyncService, SyncState},
|
||||
tags::TagsService,
|
||||
};
|
||||
use crate::{backend::dbproxy::db_command_bytes, log, pb, pb::backend::ServiceIndex, prelude::*};
|
||||
use crate::{backend::dbproxy::db_command_bytes, pb, pb::backend::ServiceIndex, prelude::*};
|
||||
|
||||
pub struct Backend {
|
||||
col: Arc<Mutex<Option<Collection>>>,
|
||||
|
@ -71,7 +70,6 @@ pub struct Backend {
|
|||
sync_abort: AbortHandleSlot,
|
||||
progress_state: Arc<Mutex<ProgressState>>,
|
||||
runtime: OnceCell<Runtime>,
|
||||
log: Logger,
|
||||
state: Arc<Mutex<BackendState>>,
|
||||
backup_task: Arc<Mutex<Option<JoinHandle<Result<()>>>>>,
|
||||
}
|
||||
|
@ -81,20 +79,19 @@ struct BackendState {
|
|||
sync: SyncState,
|
||||
}
|
||||
|
||||
pub fn init_backend(init_msg: &[u8], log: Option<Logger>) -> result::Result<Backend, String> {
|
||||
pub fn init_backend(init_msg: &[u8]) -> result::Result<Backend, String> {
|
||||
let input: pb::backend::BackendInit = match pb::backend::BackendInit::decode(init_msg) {
|
||||
Ok(req) => req,
|
||||
Err(_) => return Err("couldn't decode init request".into()),
|
||||
};
|
||||
|
||||
let tr = I18n::new(&input.preferred_langs);
|
||||
let log = log.unwrap_or_else(log::terminal);
|
||||
|
||||
Ok(Backend::new(tr, input.server, log))
|
||||
Ok(Backend::new(tr, input.server))
|
||||
}
|
||||
|
||||
impl Backend {
|
||||
pub fn new(tr: I18n, server: bool, log: Logger) -> Backend {
|
||||
pub fn new(tr: I18n, server: bool) -> Backend {
|
||||
Backend {
|
||||
col: Arc::new(Mutex::new(None)),
|
||||
tr,
|
||||
|
@ -105,7 +102,6 @@ impl Backend {
|
|||
last_progress: None,
|
||||
})),
|
||||
runtime: OnceCell::new(),
|
||||
log,
|
||||
state: Arc::new(Mutex::new(BackendState::default())),
|
||||
backup_task: Arc::new(Mutex::new(None)),
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ mod server;
|
|||
use std::sync::Arc;
|
||||
|
||||
use futures::future::{AbortHandle, AbortRegistration, Abortable};
|
||||
use slog::warn;
|
||||
use tracing::warn;
|
||||
|
||||
use super::{progress::AbortHandleSlot, Backend};
|
||||
pub(super) use crate::pb::sync::sync_service::Service as SyncService;
|
||||
|
@ -153,9 +153,7 @@ impl Backend {
|
|||
// abort handles by just iterating over them all in
|
||||
// abort_sync). But for now, just log a warning if there was
|
||||
// already one present -- but don't abort it either.
|
||||
let log = self.with_col(|col| Ok(col.log.clone()))?;
|
||||
warn!(
|
||||
log,
|
||||
"new sync_abort handle registered, but old one was still present (old sync job might not be cancelled on abort)"
|
||||
);
|
||||
}
|
||||
|
@ -184,7 +182,6 @@ impl Backend {
|
|||
let col = guard.as_mut().unwrap();
|
||||
let folder = col.media_folder.clone();
|
||||
let db = col.media_db.clone();
|
||||
let log = col.log.clone();
|
||||
drop(guard);
|
||||
|
||||
// start the sync
|
||||
|
@ -193,7 +190,7 @@ impl Backend {
|
|||
|
||||
let mgr = MediaManager::new(&folder, &db)?;
|
||||
let rt = self.runtime_handle();
|
||||
let sync_fut = mgr.sync_media(progress_fn, input.host_number, &input.hkey, log);
|
||||
let sync_fut = mgr.sync_media(progress_fn, input.host_number, &input.hkey);
|
||||
let abortable_sync = Abortable::new(sync_fut, abort_reg);
|
||||
let result = rt.block_on(abortable_sync);
|
||||
|
||||
|
|
|
@ -13,10 +13,10 @@ use std::{
|
|||
|
||||
use chrono::prelude::*;
|
||||
use itertools::Itertools;
|
||||
use log::error;
|
||||
use tracing::error;
|
||||
|
||||
use crate::{
|
||||
import_export::package::export_colpkg_from_data, io::read_file, log,
|
||||
import_export::package::export_colpkg_from_data, io::read_file,
|
||||
pb::config::preferences::BackupLimits, prelude::*,
|
||||
};
|
||||
|
||||
|
@ -37,13 +37,12 @@ impl Collection {
|
|||
if should_skip_backup(force, limits.minimum_interval_mins, backup_folder.as_ref())? {
|
||||
Ok(None)
|
||||
} else {
|
||||
let log = self.log.clone();
|
||||
let tr = self.tr.clone();
|
||||
self.storage.checkpoint()?;
|
||||
let col_data = read_file(&self.col_path)?;
|
||||
self.update_last_backup_timestamp()?;
|
||||
Ok(Some(thread::spawn(move || {
|
||||
backup_inner(&col_data, &backup_folder, limits, log, &tr)
|
||||
backup_inner(&col_data, &backup_folder, limits, &tr)
|
||||
})))
|
||||
}
|
||||
}
|
||||
|
@ -86,11 +85,10 @@ fn backup_inner<P: AsRef<Path>>(
|
|||
col_data: &[u8],
|
||||
backup_folder: P,
|
||||
limits: BackupLimits,
|
||||
log: Logger,
|
||||
tr: &I18n,
|
||||
) -> Result<()> {
|
||||
write_backup(col_data, backup_folder.as_ref(), tr)?;
|
||||
thin_backups(backup_folder, limits, &log)
|
||||
thin_backups(backup_folder, limits)
|
||||
}
|
||||
|
||||
fn write_backup<S: AsRef<OsStr>>(col_data: &[u8], backup_folder: S, tr: &I18n) -> Result<()> {
|
||||
|
@ -99,17 +97,13 @@ fn write_backup<S: AsRef<OsStr>>(col_data: &[u8], backup_folder: S, tr: &I18n) -
|
|||
export_colpkg_from_data(&out_path, col_data, tr)
|
||||
}
|
||||
|
||||
fn thin_backups<P: AsRef<Path>>(
|
||||
backup_folder: P,
|
||||
limits: BackupLimits,
|
||||
log: &Logger,
|
||||
) -> Result<()> {
|
||||
fn thin_backups<P: AsRef<Path>>(backup_folder: P, limits: BackupLimits) -> Result<()> {
|
||||
let backups =
|
||||
read_dir(backup_folder)?.filter_map(|entry| entry.ok().and_then(Backup::from_entry));
|
||||
let obsolete_backups = BackupFilter::new(Local::now(), limits).obsolete_backups(backups);
|
||||
for backup in obsolete_backups {
|
||||
if let Err(error) = remove_file(&backup.path) {
|
||||
error!(log, "failed to remove {:?}: {error:?}", &backup.path);
|
||||
error!("failed to remove {:?}: {error:?}", &backup.path);
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@ use crate::{
|
|||
decks::{Deck, DeckId},
|
||||
error::Result,
|
||||
i18n::I18n,
|
||||
log::{default_logger, Logger},
|
||||
notetype::{Notetype, NotetypeId},
|
||||
scheduler::{queue::CardQueues, SchedulerInfo},
|
||||
storage::{SchemaVersion, SqliteStorage},
|
||||
|
@ -29,7 +28,6 @@ pub struct CollectionBuilder {
|
|||
media_db: Option<PathBuf>,
|
||||
server: Option<bool>,
|
||||
tr: Option<I18n>,
|
||||
log: Option<Logger>,
|
||||
}
|
||||
|
||||
impl CollectionBuilder {
|
||||
|
@ -50,7 +48,6 @@ impl CollectionBuilder {
|
|||
let server = self.server.unwrap_or_default();
|
||||
let media_folder = self.media_folder.clone().unwrap_or_default();
|
||||
let media_db = self.media_db.clone().unwrap_or_default();
|
||||
let log = self.log.clone().unwrap_or_else(crate::log::terminal);
|
||||
|
||||
let storage = SqliteStorage::open_or_create(&col_path, &tr, server)?;
|
||||
let col = Collection {
|
||||
|
@ -59,7 +56,6 @@ impl CollectionBuilder {
|
|||
media_folder,
|
||||
media_db,
|
||||
tr,
|
||||
log,
|
||||
server,
|
||||
state: CollectionState::default(),
|
||||
};
|
||||
|
@ -87,18 +83,6 @@ impl CollectionBuilder {
|
|||
self.tr = Some(tr);
|
||||
self
|
||||
}
|
||||
|
||||
/// Directly set the logger.
|
||||
pub fn set_logger(&mut self, log: Logger) -> &mut Self {
|
||||
self.log = Some(log);
|
||||
self
|
||||
}
|
||||
|
||||
/// Log to the provided file.
|
||||
pub fn set_log_file(&mut self, log_file: &str) -> Result<&mut Self, std::io::Error> {
|
||||
self.set_logger(default_logger(Some(log_file))?);
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -129,7 +113,6 @@ pub struct Collection {
|
|||
pub(crate) media_folder: PathBuf,
|
||||
pub(crate) media_db: PathBuf,
|
||||
pub(crate) tr: I18n,
|
||||
pub(crate) log: Logger,
|
||||
pub(crate) server: bool,
|
||||
pub(crate) state: CollectionState,
|
||||
}
|
||||
|
@ -140,8 +123,7 @@ impl Collection {
|
|||
builder
|
||||
.set_media_paths(self.media_folder.clone(), self.media_db.clone())
|
||||
.set_server(self.server)
|
||||
.set_tr(self.tr.clone())
|
||||
.set_logger(self.log.clone());
|
||||
.set_tr(self.tr.clone());
|
||||
builder
|
||||
}
|
||||
|
||||
|
|
|
@ -11,8 +11,8 @@ pub(crate) mod undo;
|
|||
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
use serde_repr::{Deserialize_repr, Serialize_repr};
|
||||
use slog::warn;
|
||||
use strum::IntoStaticStr;
|
||||
use tracing::warn;
|
||||
|
||||
pub use self::{
|
||||
bool::BoolKey, deck::DeckConfigKey, notetype::get_aux_notetype_config_key,
|
||||
|
@ -109,7 +109,7 @@ impl Collection {
|
|||
Ok(Some(val)) => Some(val),
|
||||
Ok(None) => None,
|
||||
Err(e) => {
|
||||
warn!(self.log, "error accessing config key"; "key"=>key, "err"=>?e);
|
||||
warn!(key, ?e, "error accessing config key");
|
||||
None
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
use std::{collections::HashSet, sync::Arc};
|
||||
|
||||
use itertools::Itertools;
|
||||
use slog::debug;
|
||||
use tracing::debug;
|
||||
|
||||
use crate::{
|
||||
collection::Collection,
|
||||
|
@ -89,9 +89,9 @@ impl Collection {
|
|||
F: FnMut(DatabaseCheckProgress, bool),
|
||||
{
|
||||
progress_fn(DatabaseCheckProgress::Integrity, false);
|
||||
debug!(self.log, "quick check");
|
||||
debug!("quick check");
|
||||
if self.storage.quick_check_corrupt() {
|
||||
debug!(self.log, "quick check failed");
|
||||
debug!("quick check failed");
|
||||
return Err(AnkiError::db_error(
|
||||
self.tr.database_check_corrupt(),
|
||||
DbErrorKind::Corrupt,
|
||||
|
@ -99,7 +99,7 @@ impl Collection {
|
|||
}
|
||||
|
||||
progress_fn(DatabaseCheckProgress::Optimize, false);
|
||||
debug!(self.log, "optimize");
|
||||
debug!("optimize");
|
||||
self.storage.optimize()?;
|
||||
|
||||
self.transact_no_undo(|col| col.check_database_inner(progress_fn))
|
||||
|
@ -113,28 +113,28 @@ impl Collection {
|
|||
|
||||
// cards first, as we need to be able to read them to process notes
|
||||
progress_fn(DatabaseCheckProgress::Cards, false);
|
||||
debug!(self.log, "check cards");
|
||||
debug!("check cards");
|
||||
self.check_card_properties(&mut out)?;
|
||||
self.check_orphaned_cards(&mut out)?;
|
||||
|
||||
debug!(self.log, "check decks");
|
||||
debug!("check decks");
|
||||
self.check_missing_deck_ids(&mut out)?;
|
||||
self.check_filtered_cards(&mut out)?;
|
||||
|
||||
debug!(self.log, "check notetypes");
|
||||
debug!("check notetypes");
|
||||
self.check_notetypes(&mut out, &mut progress_fn)?;
|
||||
|
||||
progress_fn(DatabaseCheckProgress::History, false);
|
||||
|
||||
debug!(self.log, "check review log");
|
||||
debug!("check review log");
|
||||
self.check_revlog(&mut out)?;
|
||||
|
||||
debug!(self.log, "missing decks");
|
||||
debug!("missing decks");
|
||||
self.check_missing_deck_names(&mut out)?;
|
||||
|
||||
self.update_next_new_position()?;
|
||||
|
||||
debug!(self.log, "db check finished: {:#?}", out);
|
||||
debug!("db check finished: {:#?}", out);
|
||||
|
||||
Ok(out)
|
||||
}
|
||||
|
@ -216,7 +216,7 @@ impl Collection {
|
|||
let mut checked_notes = 0;
|
||||
|
||||
for (ntid, group) in &nids_by_notetype.into_iter().group_by(|tup| tup.0) {
|
||||
debug!(self.log, "check notetype: {}", ntid);
|
||||
debug!("check notetype: {}", ntid);
|
||||
let mut group = group.peekable();
|
||||
let nt = match self.get_notetype(ntid)? {
|
||||
None => {
|
||||
|
@ -363,7 +363,7 @@ impl Collection {
|
|||
field_count: usize,
|
||||
previous_id: NotetypeId,
|
||||
) -> Result<Arc<Notetype>> {
|
||||
debug!(self.log, "create recovery notetype");
|
||||
debug!("create recovery notetype");
|
||||
let extra_cards_required = self
|
||||
.storage
|
||||
.highest_card_ordinal_for_notetype(previous_id)?;
|
||||
|
|
|
@ -38,9 +38,7 @@ impl Context<'_> {
|
|||
}
|
||||
|
||||
let db_progress_fn = self.progress.media_db_fn(ImportProgress::MediaCheck)?;
|
||||
let existing_sha1s = self
|
||||
.media_manager
|
||||
.all_checksums(db_progress_fn, &self.target_col.log)?;
|
||||
let existing_sha1s = self.media_manager.all_checksums(db_progress_fn)?;
|
||||
|
||||
prepare_media(
|
||||
media_entries,
|
||||
|
|
|
@ -36,7 +36,6 @@ pub fn import_colpkg(
|
|||
target_media_folder: &Path,
|
||||
media_db: &Path,
|
||||
progress_fn: impl 'static + FnMut(ImportProgress, bool) -> bool,
|
||||
log: &Logger,
|
||||
) -> Result<()> {
|
||||
let mut progress = IncrementableProgress::new(progress_fn);
|
||||
progress.call(ImportProgress::File)?;
|
||||
|
@ -58,7 +57,6 @@ pub fn import_colpkg(
|
|||
&mut archive,
|
||||
target_media_folder,
|
||||
media_db,
|
||||
log,
|
||||
)?;
|
||||
|
||||
atomic_rename(tempfile, &col_path, true)?;
|
||||
|
@ -90,7 +88,6 @@ fn restore_media(
|
|||
archive: &mut ZipArchive<File>,
|
||||
media_folder: &Path,
|
||||
media_db: &Path,
|
||||
log: &Logger,
|
||||
) -> Result<()> {
|
||||
let media_entries = extract_media_entries(meta, archive)?;
|
||||
if media_entries.is_empty() {
|
||||
|
@ -99,7 +96,7 @@ fn restore_media(
|
|||
|
||||
create_dir_all(media_folder)?;
|
||||
let media_manager = MediaManager::new(media_folder, media_db)?;
|
||||
let mut media_comparer = MediaComparer::new(meta, progress, &media_manager, log)?;
|
||||
let mut media_comparer = MediaComparer::new(meta, progress, &media_manager)?;
|
||||
|
||||
let mut incrementor = progress.incrementor(ImportProgress::Media);
|
||||
for mut entry in media_entries {
|
||||
|
@ -171,13 +168,12 @@ impl<'a> MediaComparer<'a> {
|
|||
meta: &Meta,
|
||||
progress: &mut IncrementableProgress<ImportProgress>,
|
||||
media_manager: &'a MediaManager,
|
||||
log: &Logger,
|
||||
) -> Result<Self> {
|
||||
Ok(Self(if meta.media_list_is_hashmap() {
|
||||
None
|
||||
} else {
|
||||
let mut db_progress_fn = progress.media_db_fn(ImportProgress::MediaCheck)?;
|
||||
media_manager.register_changes(&mut db_progress_fn, log)?;
|
||||
media_manager.register_changes(&mut db_progress_fn)?;
|
||||
Some(Box::new(media_manager.checksum_getter()))
|
||||
}))
|
||||
}
|
||||
|
|
|
@ -11,7 +11,6 @@ use crate::{
|
|||
collection::CollectionBuilder,
|
||||
import_export::package::import_colpkg,
|
||||
io::{create_dir, create_dir_all, read_file},
|
||||
log::terminal,
|
||||
media::MediaManager,
|
||||
prelude::*,
|
||||
};
|
||||
|
@ -62,7 +61,6 @@ fn roundtrip() -> Result<()> {
|
|||
&import_media_dir,
|
||||
&import_media_db,
|
||||
|_, _| true,
|
||||
&terminal(),
|
||||
)?;
|
||||
|
||||
// confirm collection imported
|
||||
|
|
|
@ -3,52 +3,42 @@
|
|||
|
||||
use std::{fs, fs::OpenOptions, io};
|
||||
|
||||
pub use slog::{debug, error, Logger};
|
||||
use slog::{slog_o, Drain};
|
||||
use slog_async::OverflowStrategy;
|
||||
use once_cell::sync::OnceCell;
|
||||
use tracing::subscriber::set_global_default;
|
||||
use tracing_appender::non_blocking::{NonBlocking, WorkerGuard};
|
||||
use tracing_subscriber::{fmt, fmt::Layer, layer::SubscriberExt, EnvFilter};
|
||||
|
||||
use crate::prelude::*;
|
||||
|
||||
const LOG_ROTATE_BYTES: u64 = 50 * 1024 * 1024;
|
||||
|
||||
pub(crate) fn terminal() -> Logger {
|
||||
let decorator = slog_term::TermDecorator::new().build();
|
||||
let drain = slog_term::FullFormat::new(decorator).build().fuse();
|
||||
let drain = slog_envlogger::new(drain);
|
||||
let drain = slog_async::Async::new(drain)
|
||||
.chan_size(1_024)
|
||||
.overflow_strategy(OverflowStrategy::Block)
|
||||
.build()
|
||||
.fuse();
|
||||
Logger::root(drain, slog_o!())
|
||||
/// Enable logging to the console, and optionally also to a file.
|
||||
pub fn set_global_logger(path: Option<&str>) -> Result<()> {
|
||||
let file_writer = if let Some(path) = path {
|
||||
Some(Layer::new().with_writer(get_appender(path)?))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let subscriber = tracing_subscriber::registry()
|
||||
.with(fmt::layer())
|
||||
.with(file_writer)
|
||||
.with(EnvFilter::from_default_env());
|
||||
set_global_default(subscriber).or_invalid("global subscriber already set")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn file(path: &str) -> io::Result<Logger> {
|
||||
/// Holding on to this guard does not actually ensure the log file will be fully written,
|
||||
/// as statics do not implement Drop.
|
||||
static APPENDER_GUARD: OnceCell<WorkerGuard> = OnceCell::new();
|
||||
|
||||
fn get_appender(path: &str) -> Result<NonBlocking> {
|
||||
maybe_rotate_log(path)?;
|
||||
let file = OpenOptions::new().create(true).append(true).open(path)?;
|
||||
|
||||
let decorator = slog_term::PlainSyncDecorator::new(file);
|
||||
let drain = slog_term::FullFormat::new(decorator).build().fuse();
|
||||
let drain = slog_envlogger::new(drain);
|
||||
|
||||
if std::env::var("LOGTERM").is_ok() {
|
||||
// log to the terminal as well
|
||||
let decorator = slog_term::TermDecorator::new().build();
|
||||
let term_drain = slog_term::FullFormat::new(decorator).build().fuse();
|
||||
let term_drain = slog_envlogger::new(term_drain);
|
||||
let joined_drain = slog::Duplicate::new(drain, term_drain).fuse();
|
||||
let drain = slog_async::Async::new(joined_drain)
|
||||
.chan_size(1_024)
|
||||
.overflow_strategy(OverflowStrategy::Block)
|
||||
.build()
|
||||
.fuse();
|
||||
Ok(Logger::root(drain, slog_o!()))
|
||||
} else {
|
||||
let drain = slog_async::Async::new(drain)
|
||||
.chan_size(1_024)
|
||||
.overflow_strategy(OverflowStrategy::Block)
|
||||
.build()
|
||||
.fuse();
|
||||
Ok(Logger::root(drain, slog_o!()))
|
||||
let (appender, guard) = tracing_appender::non_blocking(file);
|
||||
if APPENDER_GUARD.set(guard).is_err() {
|
||||
invalid_input!("log file should be set only once");
|
||||
}
|
||||
Ok(appender)
|
||||
}
|
||||
|
||||
fn maybe_rotate_log(path: &str) -> io::Result<()> {
|
||||
|
@ -79,11 +69,3 @@ fn maybe_rotate_log(path: &str) -> io::Result<()> {
|
|||
// and rotate the primary log
|
||||
fs::rename(path, path2)
|
||||
}
|
||||
|
||||
/// Get a logger, logging to a file if a path was provided, otherwise terminal.
|
||||
pub fn default_logger(path: Option<&str>) -> io::Result<Logger> {
|
||||
Ok(match path {
|
||||
Some(path) => file(path)?,
|
||||
None => terminal(),
|
||||
})
|
||||
}
|
||||
|
|
|
@ -3,9 +3,10 @@
|
|||
|
||||
use std::{collections::HashMap, path::Path, time};
|
||||
|
||||
use tracing::debug;
|
||||
|
||||
use crate::{
|
||||
io::read_dir_files,
|
||||
log::{debug, Logger},
|
||||
media::{
|
||||
database::{MediaDatabaseContext, MediaEntry},
|
||||
files::{
|
||||
|
@ -30,23 +31,17 @@ where
|
|||
media_folder: &'a Path,
|
||||
progress_cb: F,
|
||||
checked: usize,
|
||||
log: &'a Logger,
|
||||
}
|
||||
|
||||
impl<F> ChangeTracker<'_, F>
|
||||
where
|
||||
F: FnMut(usize) -> bool,
|
||||
{
|
||||
pub(super) fn new<'a>(
|
||||
media_folder: &'a Path,
|
||||
progress: F,
|
||||
log: &'a Logger,
|
||||
) -> ChangeTracker<'a, F> {
|
||||
pub(super) fn new(media_folder: &Path, progress: F) -> ChangeTracker<'_, F> {
|
||||
ChangeTracker {
|
||||
media_folder,
|
||||
progress_cb: progress,
|
||||
checked: 0,
|
||||
log,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -64,9 +59,13 @@ where
|
|||
let dirmod = mtime_as_i64(self.media_folder)?;
|
||||
|
||||
let mut meta = ctx.get_meta()?;
|
||||
debug!(self.log, "begin change check"; "folder_mod" => dirmod, "db_mod" => meta.folder_mtime);
|
||||
debug!(
|
||||
folder_mod = dirmod,
|
||||
db_mod = meta.folder_mtime,
|
||||
"begin change check"
|
||||
);
|
||||
if dirmod == meta.folder_mtime {
|
||||
debug!(self.log, "skip check");
|
||||
debug!("skip check");
|
||||
return Ok(());
|
||||
} else {
|
||||
meta.folder_mtime = dirmod;
|
||||
|
@ -114,7 +113,7 @@ where
|
|||
Some(fname) => fname,
|
||||
None => {
|
||||
// not normalized; skip it
|
||||
debug!(self.log, "ignore non-normalized"; "fname"=>disk_fname);
|
||||
debug!(fname = disk_fname, "ignore non-normalized");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
@ -144,7 +143,7 @@ where
|
|||
.as_secs() as i64;
|
||||
if let Some(previous_mtime) = previous_mtime {
|
||||
if previous_mtime == mtime {
|
||||
debug!(self.log, "mtime unchanged"; "fname"=>fname.as_ref());
|
||||
debug!(fname = fname.as_ref(), "mtime unchanged");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
@ -158,10 +157,12 @@ where
|
|||
mtime,
|
||||
is_new: previous_mtime.is_none(),
|
||||
});
|
||||
debug!(self.log, "added or changed";
|
||||
"fname"=>fname.as_ref(),
|
||||
"mtime"=>mtime,
|
||||
"sha1"=>sha1.as_ref().map(|s| hex::encode(&s[0..4])));
|
||||
debug!(
|
||||
fname = fname.as_ref(),
|
||||
mtime,
|
||||
sha1 = sha1.as_ref().map(|s| hex::encode(&s[0..4])),
|
||||
"added or changed"
|
||||
);
|
||||
|
||||
self.checked += 1;
|
||||
if self.checked % 10 == 0 {
|
||||
|
@ -172,7 +173,7 @@ where
|
|||
// any remaining entries from the database have been deleted
|
||||
let removed: Vec<_> = mtimes.into_iter().map(|(k, _)| k).collect();
|
||||
for f in &removed {
|
||||
debug!(self.log, "db entry missing on disk"; "fname"=>f);
|
||||
debug!(fname = f, "db entry missing on disk");
|
||||
}
|
||||
|
||||
Ok((added_or_changed, removed))
|
||||
|
@ -284,9 +285,7 @@ mod test {
|
|||
|
||||
let progress_cb = |_n| true;
|
||||
|
||||
let log = crate::log::terminal();
|
||||
|
||||
ChangeTracker::new(&mgr.media_folder, progress_cb, &log).register_changes(&mut ctx)?;
|
||||
ChangeTracker::new(&mgr.media_folder, progress_cb).register_changes(&mut ctx)?;
|
||||
|
||||
let mut entry = ctx.transact(|ctx| {
|
||||
assert_eq!(ctx.count()?, 1);
|
||||
|
@ -321,7 +320,7 @@ mod test {
|
|||
Ok(entry)
|
||||
})?;
|
||||
|
||||
ChangeTracker::new(&mgr.media_folder, progress_cb, &log).register_changes(&mut ctx)?;
|
||||
ChangeTracker::new(&mgr.media_folder, progress_cb).register_changes(&mut ctx)?;
|
||||
|
||||
ctx.transact(|ctx| {
|
||||
assert_eq!(ctx.count()?, 1);
|
||||
|
@ -354,7 +353,7 @@ mod test {
|
|||
|
||||
change_mtime(&media_dir);
|
||||
|
||||
ChangeTracker::new(&mgr.media_folder, progress_cb, &log).register_changes(&mut ctx)?;
|
||||
ChangeTracker::new(&mgr.media_folder, progress_cb).register_changes(&mut ctx)?;
|
||||
|
||||
assert_eq!(ctx.count()?, 0);
|
||||
assert!(!ctx.get_pending_uploads(1)?.is_empty());
|
||||
|
|
|
@ -9,12 +9,12 @@ use std::{
|
|||
};
|
||||
|
||||
use anki_i18n::without_unicode_isolation;
|
||||
use tracing::debug;
|
||||
|
||||
use crate::{
|
||||
collection::Collection,
|
||||
error::{AnkiError, DbErrorKind, Result},
|
||||
latex::extract_latex_expanding_clozes,
|
||||
log::debug,
|
||||
media::{
|
||||
database::MediaDatabaseContext,
|
||||
files::{
|
||||
|
@ -250,7 +250,7 @@ where
|
|||
) -> Result<Cow<'a, str>> {
|
||||
// add a copy of the file using the correct name
|
||||
let fname = self.mgr.add_file(ctx, disk_fname, &data)?;
|
||||
debug!(self.ctx.log, "renamed"; "from"=>disk_fname, "to"=>&fname.as_ref());
|
||||
debug!(from = disk_fname, to = &fname.as_ref(), "renamed");
|
||||
assert_ne!(fname.as_ref(), disk_fname);
|
||||
|
||||
// remove the original file
|
||||
|
@ -340,7 +340,7 @@ where
|
|||
self.mgr
|
||||
.add_file(&mut self.mgr.dbctx(), fname.as_ref(), &data)?;
|
||||
} else {
|
||||
debug!(self.ctx.log, "file disappeared while restoring trash"; "fname"=>fname.as_ref());
|
||||
debug!(?fname, "file disappeared while restoring trash");
|
||||
}
|
||||
fs::remove_file(dentry.path())?;
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ use std::{
|
|||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use sha1::{Digest, Sha1};
|
||||
use tracing::debug;
|
||||
use unic_ucd_category::GeneralCategory;
|
||||
use unicode_normalization::{is_nfc, UnicodeNormalization};
|
||||
|
||||
|
@ -383,7 +384,6 @@ pub(super) fn add_file_from_ankiweb(
|
|||
media_folder: &Path,
|
||||
fname: &str,
|
||||
data: &[u8],
|
||||
log: &Logger,
|
||||
) -> Result<AddedFile> {
|
||||
let sha1 = sha1_of_data(data);
|
||||
let normalized = normalize_filename(fname);
|
||||
|
@ -391,13 +391,17 @@ pub(super) fn add_file_from_ankiweb(
|
|||
// if the filename is already valid, we can write the file directly
|
||||
let (renamed_from, path) = if let Cow::Borrowed(_) = normalized {
|
||||
let path = media_folder.join(normalized.as_ref());
|
||||
debug!(log, "write"; "fname" => normalized.as_ref());
|
||||
debug!(fname = normalized.as_ref(), "write");
|
||||
write_file(&path, data)?;
|
||||
(None, path)
|
||||
} else {
|
||||
// ankiweb sent us a non-normalized filename, so we'll rename it
|
||||
let new_name = add_data_to_folder_uniquely(media_folder, fname, data, sha1)?;
|
||||
debug!(log, "non-normalized filename received"; "fname"=>&fname, "rename_to"=>new_name.as_ref());
|
||||
debug!(
|
||||
fname,
|
||||
rename_to = new_name.as_ref(),
|
||||
"non-normalized filename received"
|
||||
);
|
||||
(
|
||||
Some(fname.to_string()),
|
||||
media_folder.join(new_name.as_ref()),
|
||||
|
|
|
@ -8,7 +8,6 @@ use std::{
|
|||
};
|
||||
|
||||
use rusqlite::Connection;
|
||||
use slog::Logger;
|
||||
|
||||
use self::changetracker::ChangeTracker;
|
||||
use crate::{
|
||||
|
@ -150,12 +149,11 @@ impl MediaManager {
|
|||
progress: F,
|
||||
host_number: u32,
|
||||
hkey: &'a str,
|
||||
log: Logger,
|
||||
) -> Result<()>
|
||||
where
|
||||
F: FnMut(MediaSyncProgress) -> bool,
|
||||
{
|
||||
let mut syncer = MediaSyncer::new(self, progress, host_number, log);
|
||||
let mut syncer = MediaSyncer::new(self, progress, host_number);
|
||||
syncer.sync(hkey).await
|
||||
}
|
||||
|
||||
|
@ -166,10 +164,9 @@ impl MediaManager {
|
|||
pub fn all_checksums(
|
||||
&self,
|
||||
progress: impl FnMut(usize) -> bool,
|
||||
log: &Logger,
|
||||
) -> Result<HashMap<String, Sha1Hash>> {
|
||||
let mut dbctx = self.dbctx();
|
||||
ChangeTracker::new(&self.media_folder, progress, log).register_changes(&mut dbctx)?;
|
||||
ChangeTracker::new(&self.media_folder, progress).register_changes(&mut dbctx)?;
|
||||
dbctx.all_checksums()
|
||||
}
|
||||
|
||||
|
@ -182,12 +179,8 @@ impl MediaManager {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn register_changes(
|
||||
&self,
|
||||
progress: &mut impl FnMut(usize) -> bool,
|
||||
log: &Logger,
|
||||
) -> Result<()> {
|
||||
ChangeTracker::new(&self.media_folder, progress, log).register_changes(&mut self.dbctx())
|
||||
pub fn register_changes(&self, progress: &mut impl FnMut(usize) -> bool) -> Result<()> {
|
||||
ChangeTracker::new(&self.media_folder, progress).register_changes(&mut self.dbctx())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@ use bytes::Bytes;
|
|||
use reqwest::{multipart, Client, Response};
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use serde_tuple::Serialize_tuple;
|
||||
use slog::{debug, Logger};
|
||||
use time::Duration;
|
||||
use tracing::debug;
|
||||
use version::sync_client_version;
|
||||
|
||||
use crate::{
|
||||
|
@ -56,7 +56,6 @@ where
|
|||
progress_cb: P,
|
||||
progress: MediaSyncProgress,
|
||||
endpoint: String,
|
||||
log: Logger,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
|
@ -159,12 +158,7 @@ impl<P> MediaSyncer<'_, P>
|
|||
where
|
||||
P: FnMut(MediaSyncProgress) -> bool,
|
||||
{
|
||||
pub fn new(
|
||||
mgr: &MediaManager,
|
||||
progress_cb: P,
|
||||
host_number: u32,
|
||||
log: Logger,
|
||||
) -> MediaSyncer<'_, P> {
|
||||
pub fn new(mgr: &MediaManager, progress_cb: P, host_number: u32) -> MediaSyncer<'_, P> {
|
||||
let timeouts = Timeouts::new();
|
||||
let client = Client::builder()
|
||||
.connect_timeout(Duration::from_secs(timeouts.connect_secs))
|
||||
|
@ -182,7 +176,6 @@ where
|
|||
progress_cb,
|
||||
progress: Default::default(),
|
||||
endpoint,
|
||||
log,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -192,7 +185,7 @@ where
|
|||
|
||||
pub async fn sync(&mut self, hkey: &str) -> Result<()> {
|
||||
self.sync_inner(hkey).await.map_err(|e| {
|
||||
debug!(self.log, "sync error: {:?}", e);
|
||||
debug!("sync error: {:?}", e);
|
||||
e
|
||||
})
|
||||
}
|
||||
|
@ -204,19 +197,16 @@ where
|
|||
let meta = self.ctx.get_meta()?;
|
||||
let client_usn = meta.last_sync_usn;
|
||||
|
||||
debug!(self.log, "begin media sync");
|
||||
debug!("begin media sync");
|
||||
let (sync_key, server_usn) = self.sync_begin(hkey).await?;
|
||||
self.skey = Some(sync_key);
|
||||
debug!(self.log, "server usn was {}", server_usn);
|
||||
debug!("server usn was {}", server_usn);
|
||||
|
||||
let mut actions_performed = false;
|
||||
|
||||
// need to fetch changes from server?
|
||||
if client_usn != server_usn {
|
||||
debug!(
|
||||
self.log,
|
||||
"differs from local usn {}, fetching changes", client_usn
|
||||
);
|
||||
debug!("differs from local usn {}, fetching changes", client_usn);
|
||||
self.fetch_changes(meta).await?;
|
||||
actions_performed = true;
|
||||
}
|
||||
|
@ -234,7 +224,7 @@ where
|
|||
|
||||
self.fire_progress_cb()?;
|
||||
|
||||
debug!(self.log, "media sync complete");
|
||||
debug!("media sync complete");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -250,7 +240,7 @@ where
|
|||
(progress_cb)(*progress)
|
||||
};
|
||||
|
||||
ChangeTracker::new(self.mgr.media_folder.as_path(), progress, &self.log)
|
||||
ChangeTracker::new(self.mgr.media_folder.as_path(), progress)
|
||||
.register_changes(&mut self.ctx)
|
||||
}
|
||||
|
||||
|
@ -277,15 +267,11 @@ where
|
|||
async fn fetch_changes(&mut self, mut meta: MediaDatabaseMetadata) -> Result<()> {
|
||||
let mut last_usn = meta.last_sync_usn;
|
||||
loop {
|
||||
debug!(
|
||||
self.log,
|
||||
"fetching record batch";
|
||||
"start_usn"=>last_usn
|
||||
);
|
||||
debug!(start_usn = last_usn, "fetching record batch");
|
||||
|
||||
let batch = self.fetch_record_batch(last_usn).await?;
|
||||
if batch.is_empty() {
|
||||
debug!(self.log, "empty batch, done");
|
||||
debug!("empty batch, done");
|
||||
break;
|
||||
}
|
||||
last_usn = batch.last().unwrap().usn;
|
||||
|
@ -294,7 +280,7 @@ where
|
|||
self.fire_progress_cb()?;
|
||||
|
||||
let (to_download, to_delete, to_remove_pending) =
|
||||
determine_required_changes(&mut self.ctx, &batch, &self.log)?;
|
||||
determine_required_changes(&mut self.ctx, &batch)?;
|
||||
|
||||
// file removal
|
||||
self.mgr.remove_files(&mut self.ctx, to_delete.as_slice())?;
|
||||
|
@ -311,12 +297,9 @@ where
|
|||
.map(ToOwned::to_owned)
|
||||
.collect();
|
||||
let zip_data = self.fetch_zip(batch.as_slice()).await?;
|
||||
let download_batch = extract_into_media_folder(
|
||||
self.mgr.media_folder.as_path(),
|
||||
zip_data,
|
||||
&self.log,
|
||||
)?
|
||||
.into_iter();
|
||||
let download_batch =
|
||||
extract_into_media_folder(self.mgr.media_folder.as_path(), zip_data)?
|
||||
.into_iter();
|
||||
let len = download_batch.len();
|
||||
dl_fnames = &dl_fnames[len..];
|
||||
downloaded.extend(download_batch);
|
||||
|
@ -327,11 +310,10 @@ where
|
|||
|
||||
// then update the DB
|
||||
let dirmod = mtime_as_i64(&self.mgr.media_folder)?;
|
||||
let log = &self.log;
|
||||
self.ctx.transact(|ctx| {
|
||||
record_clean(ctx, &to_remove_pending, log)?;
|
||||
record_removals(ctx, &to_delete, log)?;
|
||||
record_additions(ctx, downloaded, log)?;
|
||||
record_clean(ctx, &to_remove_pending)?;
|
||||
record_removals(ctx, &to_delete)?;
|
||||
record_additions(ctx, downloaded)?;
|
||||
|
||||
// update usn
|
||||
meta.last_sync_usn = last_usn;
|
||||
|
@ -351,7 +333,7 @@ where
|
|||
break;
|
||||
}
|
||||
|
||||
let zip_data = zip_files(&mut self.ctx, &self.mgr.media_folder, &pending, &self.log)?;
|
||||
let zip_data = zip_files(&mut self.ctx, &self.mgr.media_folder, &pending)?;
|
||||
if zip_data.is_none() {
|
||||
self.progress.checked += pending.len();
|
||||
self.fire_progress_cb()?;
|
||||
|
@ -377,16 +359,14 @@ where
|
|||
.map(|e| &e.fname)
|
||||
.collect();
|
||||
let fname_cnt = fnames.len() as i32;
|
||||
let log = &self.log;
|
||||
self.ctx.transact(|ctx| {
|
||||
record_clean(ctx, fnames.as_slice(), log)?;
|
||||
record_clean(ctx, fnames.as_slice())?;
|
||||
let mut meta = ctx.get_meta()?;
|
||||
if meta.last_sync_usn + fname_cnt == reply.current_usn {
|
||||
meta.last_sync_usn = reply.current_usn;
|
||||
ctx.set_meta(&meta)?;
|
||||
} else {
|
||||
debug!(
|
||||
log,
|
||||
"server usn {} is not {}, skipping usn update",
|
||||
reply.current_usn,
|
||||
meta.last_sync_usn + fname_cnt
|
||||
|
@ -444,7 +424,7 @@ where
|
|||
async fn fetch_zip(&self, files: &[&String]) -> Result<Bytes> {
|
||||
let url = format!("{}downloadFiles", self.endpoint);
|
||||
|
||||
debug!(self.log, "requesting files: {:?}", files);
|
||||
debug!("requesting files: {:?}", files);
|
||||
|
||||
let req = ZipRequest { files };
|
||||
let resp = ankiweb_json_request(&self.client, &url, &req, self.skey(), true).await?;
|
||||
|
@ -503,7 +483,6 @@ fn determine_required_change(
|
|||
fn determine_required_changes<'a>(
|
||||
ctx: &mut MediaDatabaseContext,
|
||||
records: &'a [ServerMediaRecord],
|
||||
log: &Logger,
|
||||
) -> Result<(Vec<&'a String>, Vec<&'a String>, Vec<&'a String>)> {
|
||||
let mut to_download = vec![];
|
||||
let mut to_delete = vec![];
|
||||
|
@ -527,13 +506,12 @@ fn determine_required_changes<'a>(
|
|||
|
||||
let req_change = determine_required_change(&local_sha1, &remote.sha1, local_state);
|
||||
debug!(
|
||||
log,
|
||||
"determine action";
|
||||
"fname" => &remote.fname,
|
||||
"lsha" => local_sha1.chars().take(8).collect::<String>(),
|
||||
"rsha" => remote.sha1.chars().take(8).collect::<String>(),
|
||||
"state" => ?local_state,
|
||||
"action" => ?req_change
|
||||
fname = &remote.fname,
|
||||
lsha = local_sha1.chars().take(8).collect::<String>(),
|
||||
rsha = remote.sha1.chars().take(8).collect::<String>(),
|
||||
state = ?local_state,
|
||||
action = ?req_change,
|
||||
"determine action"
|
||||
);
|
||||
match req_change {
|
||||
RequiredChange::Download => to_download.push(&remote.fname),
|
||||
|
@ -594,11 +572,7 @@ async fn ankiweb_request(
|
|||
req.send().await?.error_for_status().map_err(Into::into)
|
||||
}
|
||||
|
||||
fn extract_into_media_folder(
|
||||
media_folder: &Path,
|
||||
zip: Bytes,
|
||||
log: &Logger,
|
||||
) -> Result<Vec<AddedFile>> {
|
||||
fn extract_into_media_folder(media_folder: &Path, zip: Bytes) -> Result<Vec<AddedFile>> {
|
||||
let reader = io::Cursor::new(zip);
|
||||
let mut zip = zip::ZipArchive::new(reader)?;
|
||||
|
||||
|
@ -620,7 +594,7 @@ fn extract_into_media_folder(
|
|||
let mut data = Vec::with_capacity(file.size() as usize);
|
||||
file.read_to_end(&mut data)?;
|
||||
|
||||
let added = add_file_from_ankiweb(media_folder, real_name, &data, log)?;
|
||||
let added = add_file_from_ankiweb(media_folder, real_name, &data)?;
|
||||
|
||||
output.push(added);
|
||||
}
|
||||
|
@ -628,29 +602,21 @@ fn extract_into_media_folder(
|
|||
Ok(output)
|
||||
}
|
||||
|
||||
fn record_removals(
|
||||
ctx: &mut MediaDatabaseContext,
|
||||
removals: &[&String],
|
||||
log: &Logger,
|
||||
) -> Result<()> {
|
||||
fn record_removals(ctx: &mut MediaDatabaseContext, removals: &[&String]) -> Result<()> {
|
||||
for &fname in removals {
|
||||
debug!(log, "mark removed"; "fname" => fname);
|
||||
debug!(fname, "mark removed");
|
||||
ctx.remove_entry(fname)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn record_additions(
|
||||
ctx: &mut MediaDatabaseContext,
|
||||
additions: Vec<AddedFile>,
|
||||
log: &Logger,
|
||||
) -> Result<()> {
|
||||
fn record_additions(ctx: &mut MediaDatabaseContext, additions: Vec<AddedFile>) -> Result<()> {
|
||||
for file in additions {
|
||||
if let Some(renamed) = file.renamed_from {
|
||||
// the file AnkiWeb sent us wasn't normalized, so we need to record
|
||||
// the old file name as a deletion
|
||||
debug!(log, "marking non-normalized file as deleted: {}", renamed);
|
||||
debug!("marking non-normalized file as deleted: {}", renamed);
|
||||
let mut entry = MediaEntry {
|
||||
fname: renamed,
|
||||
sha1: None,
|
||||
|
@ -659,10 +625,7 @@ fn record_additions(
|
|||
};
|
||||
ctx.set_entry(&entry)?;
|
||||
// and upload the new filename to ankiweb
|
||||
debug!(
|
||||
log,
|
||||
"marking renamed file as needing upload: {}", file.fname
|
||||
);
|
||||
debug!("marking renamed file as needing upload: {}", file.fname);
|
||||
entry = MediaEntry {
|
||||
fname: file.fname.to_string(),
|
||||
sha1: Some(file.sha1),
|
||||
|
@ -678,9 +641,10 @@ fn record_additions(
|
|||
mtime: file.mtime,
|
||||
sync_required: false,
|
||||
};
|
||||
debug!(log, "mark added";
|
||||
"fname" => &entry.fname,
|
||||
"sha1" => hex::encode(&entry.sha1.as_ref().unwrap()[0..4])
|
||||
debug!(
|
||||
fname = &entry.fname,
|
||||
sha1 = hex::encode(&entry.sha1.as_ref().unwrap()[0..4]),
|
||||
"mark added"
|
||||
);
|
||||
ctx.set_entry(&entry)?;
|
||||
}
|
||||
|
@ -689,12 +653,12 @@ fn record_additions(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn record_clean(ctx: &mut MediaDatabaseContext, clean: &[&String], log: &Logger) -> Result<()> {
|
||||
fn record_clean(ctx: &mut MediaDatabaseContext, clean: &[&String]) -> Result<()> {
|
||||
for &fname in clean {
|
||||
if let Some(mut entry) = ctx.get_entry(fname)? {
|
||||
if entry.sync_required {
|
||||
entry.sync_required = false;
|
||||
debug!(log, "mark clean"; "fname"=>&entry.fname);
|
||||
debug!(fname = &entry.fname, "mark clean");
|
||||
ctx.set_entry(&entry)?;
|
||||
}
|
||||
}
|
||||
|
@ -707,7 +671,6 @@ fn zip_files<'a>(
|
|||
ctx: &mut MediaDatabaseContext,
|
||||
media_folder: &Path,
|
||||
files: &'a [MediaEntry],
|
||||
log: &Logger,
|
||||
) -> Result<Option<Vec<u8>>> {
|
||||
let buf = vec![];
|
||||
let mut invalid_entries = vec![];
|
||||
|
@ -731,7 +694,7 @@ fn zip_files<'a>(
|
|||
use unicode_normalization::is_nfc;
|
||||
if !is_nfc(&file.fname) {
|
||||
// older Anki versions stored non-normalized filenames in the DB; clean them up
|
||||
debug!(log, "clean up non-nfc entry"; "fname"=>&file.fname);
|
||||
debug!(fname = file.fname, "clean up non-nfc entry");
|
||||
invalid_entries.push(&file.fname);
|
||||
continue;
|
||||
}
|
||||
|
@ -741,7 +704,7 @@ fn zip_files<'a>(
|
|||
match data_for_file(media_folder, &file.fname) {
|
||||
Ok(data) => data,
|
||||
Err(e) => {
|
||||
debug!(log, "error accessing {}: {}", &file.fname, e);
|
||||
debug!("error accessing {}: {}", &file.fname, e);
|
||||
invalid_entries.push(&file.fname);
|
||||
continue;
|
||||
}
|
||||
|
@ -754,7 +717,7 @@ fn zip_files<'a>(
|
|||
if let Some(data) = &file_data {
|
||||
let normalized = normalize_filename(&file.fname);
|
||||
if let Cow::Owned(o) = normalized {
|
||||
debug!(log, "media check required: {} should be {}", &file.fname, o);
|
||||
debug!("media check required: {} should be {}", &file.fname, o);
|
||||
invalid_entries.push(&file.fname);
|
||||
continue;
|
||||
}
|
||||
|
@ -773,14 +736,13 @@ fn zip_files<'a>(
|
|||
}
|
||||
|
||||
debug!(
|
||||
log,
|
||||
"will upload";
|
||||
"fname"=>&file.fname,
|
||||
"kind"=>if file_data.is_some() {
|
||||
fname = &file.fname,
|
||||
kind = if file_data.is_some() {
|
||||
"addition "
|
||||
} else {
|
||||
"removal"
|
||||
}
|
||||
},
|
||||
"will upload"
|
||||
);
|
||||
|
||||
entries.push(UploadEntry {
|
||||
|
@ -840,10 +802,8 @@ mod test {
|
|||
true
|
||||
};
|
||||
|
||||
let log = crate::log::terminal();
|
||||
|
||||
let mgr = MediaManager::new(&media_dir, &media_db)?;
|
||||
mgr.sync_media(progress, 0, hkey, log).await?;
|
||||
mgr.sync_media(progress, 0, hkey).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
// Copyright: Ankitects Pty Ltd and contributors
|
||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
|
||||
pub use slog::{debug, Logger};
|
||||
pub use snafu::ResultExt;
|
||||
|
||||
pub(crate) use crate::types::IntoNewtypeVec;
|
||||
|
|
|
@ -14,6 +14,7 @@ use serde::{Deserialize, Serialize};
|
|||
use serde_json::Value;
|
||||
use serde_tuple::Serialize_tuple;
|
||||
pub(crate) use server::{LocalServer, SyncServer};
|
||||
use tracing::debug;
|
||||
|
||||
use crate::{
|
||||
card::{Card, CardQueue, CardType},
|
||||
|
@ -323,10 +324,10 @@ where
|
|||
}
|
||||
|
||||
pub async fn sync(&mut self) -> Result<SyncOutput> {
|
||||
debug!(self.col.log, "fetching meta...");
|
||||
debug!("fetching meta...");
|
||||
self.fire_progress_cb(false);
|
||||
let state: SyncState = self.get_sync_state().await?;
|
||||
debug!(self.col.log, "fetched"; "state"=>?&state);
|
||||
debug!(?state, "fetched");
|
||||
match state.required {
|
||||
SyncActionRequired::NoChanges => Ok(state.into()),
|
||||
SyncActionRequired::FullSyncRequired { .. } => Ok(state.into()),
|
||||
|
@ -352,7 +353,7 @@ where
|
|||
},
|
||||
} = &e
|
||||
{
|
||||
debug!(self.col.log, "sanity check failed:\n{}", info);
|
||||
debug!("sanity check failed:\n{}", info);
|
||||
}
|
||||
|
||||
Err(e)
|
||||
|
@ -364,9 +365,9 @@ where
|
|||
|
||||
async fn get_sync_state(&self) -> Result<SyncState> {
|
||||
let remote: SyncMeta = self.remote.meta().await?;
|
||||
debug!(self.col.log, "remote {:?}", &remote);
|
||||
debug!("remote {:?}", &remote);
|
||||
if !remote.should_continue {
|
||||
debug!(self.col.log, "server says abort"; "message"=>&remote.server_message);
|
||||
debug!(remote.server_message, "server says abort");
|
||||
return Err(AnkiError::sync_error(
|
||||
remote.server_message,
|
||||
SyncErrorKind::ServerMessage,
|
||||
|
@ -374,10 +375,10 @@ where
|
|||
}
|
||||
|
||||
let local = self.col.sync_meta()?;
|
||||
debug!(self.col.log, "local {:?}", &local);
|
||||
debug!("local {:?}", &local);
|
||||
let delta = remote.current_time.0 - local.current_time.0;
|
||||
if delta.abs() > 300 {
|
||||
debug!(self.col.log, "clock off"; "delta"=>delta);
|
||||
debug!(delta, "clock off");
|
||||
return Err(AnkiError::sync_error("", SyncErrorKind::ClockIncorrect));
|
||||
}
|
||||
|
||||
|
@ -390,21 +391,21 @@ where
|
|||
self.progress.stage = SyncStage::Syncing;
|
||||
self.fire_progress_cb(false);
|
||||
|
||||
debug!(self.col.log, "start");
|
||||
debug!("start");
|
||||
self.start_and_process_deletions(&state).await?;
|
||||
debug!(self.col.log, "unchunked changes");
|
||||
debug!("unchunked changes");
|
||||
self.process_unchunked_changes(&state).await?;
|
||||
debug!(self.col.log, "begin stream from server");
|
||||
debug!("begin stream from server");
|
||||
self.process_chunks_from_server(&state).await?;
|
||||
debug!(self.col.log, "begin stream to server");
|
||||
debug!("begin stream to server");
|
||||
self.send_chunks_to_server(&state).await?;
|
||||
|
||||
self.progress.stage = SyncStage::Finalizing;
|
||||
self.fire_progress_cb(false);
|
||||
|
||||
debug!(self.col.log, "sanity check");
|
||||
debug!("sanity check");
|
||||
self.sanity_check().await?;
|
||||
debug!(self.col.log, "finalize");
|
||||
debug!("finalize");
|
||||
self.finalize(&state).await?;
|
||||
state.required = SyncActionRequired::NoChanges;
|
||||
Ok(state.into())
|
||||
|
@ -418,23 +419,27 @@ where
|
|||
.start(state.usn_at_last_sync, state.local_is_newer, None)
|
||||
.await?;
|
||||
|
||||
debug!(self.col.log, "removed on remote";
|
||||
"cards"=>remote.cards.len(),
|
||||
"notes"=>remote.notes.len(),
|
||||
"decks"=>remote.decks.len());
|
||||
debug!(
|
||||
cards = remote.cards.len(),
|
||||
notes = remote.notes.len(),
|
||||
decks = remote.decks.len(),
|
||||
"removed on remote"
|
||||
);
|
||||
|
||||
let mut local = self.col.storage.pending_graves(state.pending_usn)?;
|
||||
if let Some(new_usn) = state.new_usn {
|
||||
self.col.storage.update_pending_grave_usns(new_usn)?;
|
||||
}
|
||||
|
||||
debug!(self.col.log, "locally removed ";
|
||||
"cards"=>local.cards.len(),
|
||||
"notes"=>local.notes.len(),
|
||||
"decks"=>local.decks.len());
|
||||
debug!(
|
||||
cards = local.cards.len(),
|
||||
notes = local.notes.len(),
|
||||
decks = local.decks.len(),
|
||||
"locally removed "
|
||||
);
|
||||
|
||||
while let Some(chunk) = local.take_chunk() {
|
||||
debug!(self.col.log, "sending graves chunk");
|
||||
debug!("sending graves chunk");
|
||||
self.progress.local_remove += chunk.cards.len() + chunk.notes.len() + chunk.decks.len();
|
||||
self.remote.apply_graves(chunk).await?;
|
||||
self.fire_progress_cb(true);
|
||||
|
@ -443,7 +448,7 @@ where
|
|||
self.progress.remote_remove = remote.cards.len() + remote.notes.len() + remote.decks.len();
|
||||
self.col.apply_graves(remote, state.latest_usn)?;
|
||||
self.fire_progress_cb(true);
|
||||
debug!(self.col.log, "applied server graves");
|
||||
debug!("applied server graves");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -453,18 +458,19 @@ where
|
|||
// in the future, like other objects. Syncing tags explicitly is also probably of limited
|
||||
// usefulness.
|
||||
async fn process_unchunked_changes(&mut self, state: &SyncState) -> Result<()> {
|
||||
debug!(self.col.log, "gathering local changes");
|
||||
debug!("gathering local changes");
|
||||
let local = self.col.local_unchunked_changes(
|
||||
state.pending_usn,
|
||||
state.new_usn,
|
||||
state.local_is_newer,
|
||||
)?;
|
||||
|
||||
debug!(self.col.log, "sending";
|
||||
"notetypes"=>local.notetypes.len(),
|
||||
"decks"=>local.decks_and_config.decks.len(),
|
||||
"deck config"=>local.decks_and_config.config.len(),
|
||||
"tags"=>local.tags.len(),
|
||||
debug!(
|
||||
notetypes = local.notetypes.len(),
|
||||
decks = local.decks_and_config.decks.len(),
|
||||
deck_config = local.decks_and_config.config.len(),
|
||||
tags = local.tags.len(),
|
||||
"sending"
|
||||
);
|
||||
|
||||
self.progress.local_update += local.notetypes.len()
|
||||
|
@ -474,11 +480,12 @@ where
|
|||
let remote = self.remote.apply_changes(local).await?;
|
||||
self.fire_progress_cb(true);
|
||||
|
||||
debug!(self.col.log, "received";
|
||||
"notetypes"=>remote.notetypes.len(),
|
||||
"decks"=>remote.decks_and_config.decks.len(),
|
||||
"deck config"=>remote.decks_and_config.config.len(),
|
||||
"tags"=>remote.tags.len(),
|
||||
debug!(
|
||||
notetypes = remote.notetypes.len(),
|
||||
decks = remote.decks_and_config.decks.len(),
|
||||
deck_config = remote.decks_and_config.config.len(),
|
||||
tags = remote.tags.len(),
|
||||
"received"
|
||||
);
|
||||
|
||||
self.progress.remote_update += remote.notetypes.len()
|
||||
|
@ -495,11 +502,12 @@ where
|
|||
loop {
|
||||
let chunk: Chunk = self.remote.chunk().await?;
|
||||
|
||||
debug!(self.col.log, "received";
|
||||
"done"=>chunk.done,
|
||||
"cards"=>chunk.cards.len(),
|
||||
"notes"=>chunk.notes.len(),
|
||||
"revlog"=>chunk.revlog.len(),
|
||||
debug!(
|
||||
done = chunk.done,
|
||||
cards = chunk.cards.len(),
|
||||
notes = chunk.notes.len(),
|
||||
revlog = chunk.revlog.len(),
|
||||
"received"
|
||||
);
|
||||
|
||||
self.progress.remote_update +=
|
||||
|
@ -523,11 +531,12 @@ where
|
|||
let chunk: Chunk = self.col.get_chunk(&mut ids, state.new_usn)?;
|
||||
let done = chunk.done;
|
||||
|
||||
debug!(self.col.log, "sending";
|
||||
"done"=>chunk.done,
|
||||
"cards"=>chunk.cards.len(),
|
||||
"notes"=>chunk.notes.len(),
|
||||
"revlog"=>chunk.revlog.len(),
|
||||
debug!(
|
||||
done = chunk.done,
|
||||
cards = chunk.cards.len(),
|
||||
notes = chunk.notes.len(),
|
||||
revlog = chunk.revlog.len(),
|
||||
"sending"
|
||||
);
|
||||
|
||||
self.progress.local_update +=
|
||||
|
@ -548,12 +557,9 @@ where
|
|||
let mut local_counts = self.col.storage.sanity_check_info()?;
|
||||
self.col.add_due_counts(&mut local_counts.counts)?;
|
||||
|
||||
debug!(
|
||||
self.col.log,
|
||||
"gathered local counts; waiting for server reply"
|
||||
);
|
||||
debug!("gathered local counts; waiting for server reply");
|
||||
let out: SanityCheckResponse = self.remote.sanity_check(local_counts).await?;
|
||||
debug!(self.col.log, "got server reply");
|
||||
debug!("got server reply");
|
||||
if out.status != SanityCheckStatus::Ok {
|
||||
Err(AnkiError::sync_error(
|
||||
format!("local {:?}\nremote {:?}", out.client, out.server),
|
||||
|
|
Loading…
Reference in a new issue