diff --git a/ftl/core/preferences.ftl b/ftl/core/preferences.ftl index efaa63075..2d4523204 100644 --- a/ftl/core/preferences.ftl +++ b/ftl/core/preferences.ftl @@ -44,8 +44,9 @@ preferences-theme-dark = Dark preferences-v3-scheduler = V3 scheduler preferences-ignore-accents-in-search = Ignore accents in search (slower) preferences-backup-explanation = - Anki periodically backs up your collection when it is closed. After backups are more than 2 days old, + Anki periodically backs up your collection. After backups are more than 2 days old, Anki will start removing some of them to free up disk space. preferences-daily-backups = Daily backups to keep: preferences-weekly-backups = Weekly backups to keep: preferences-monthly-backups = Monthly backups to keep: +preferences-minutes-between-backups = Minutes between automatic backups: diff --git a/ftl/core/profiles.ftl b/ftl/core/profiles.ftl index 8347ece7e..bf2725443 100644 --- a/ftl/core/profiles.ftl +++ b/ftl/core/profiles.ftl @@ -13,3 +13,7 @@ profiles-could-not-create-data-folder = Anki could not create its data folder. P profiles-prefs-corrupt-title = Preferences Corrupt profiles-prefs-file-is-corrupt = Anki's prefs21.db file was corrupt and has been recreated. If you were using multiple profiles, please add them back using the same names to recover your cards. profiles-profile-does-not-exist = Requested profile does not exist. +profiles-creating-backup = Creating Backup... +profiles-backup-created = Backup created. +profiles-backup-creation-failed = Backup creation failed: { $reason } +profiles-backup-unchanged = No changes since latest backup. diff --git a/ftl/qt/qt-accel.ftl b/ftl/qt/qt-accel.ftl index 199c77387..27d421f99 100644 --- a/ftl/qt/qt-accel.ftl +++ b/ftl/qt/qt-accel.ftl @@ -40,3 +40,4 @@ qt-accel-zoom-out = Zoom &Out qt-accel-reset-zoom = &Reset Zoom qt-accel-zoom-editor-in = Zoom Editor &In qt-accel-zoom-editor-out = Zoom Editor &Out +qt-accel-create-backup = Create &Backup diff --git a/proto/anki/collection.proto b/proto/anki/collection.proto index f1552ccbd..d143a0012 100644 --- a/proto/anki/collection.proto +++ b/proto/anki/collection.proto @@ -18,6 +18,13 @@ service CollectionService { rpc MergeUndoEntries(generic.UInt32) returns (OpChanges); rpc LatestProgress(generic.Empty) returns (Progress); rpc SetWantsAbort(generic.Empty) returns (generic.Empty); + // Create a no-media backup. Caller must ensure there is no active + // transaction. Unlike a collection export, does not require reopening the DB, + // as there is no downgrade step. + // Returns false if it's not time to make a backup yet. + rpc CreateBackup(CreateBackupRequest) returns (generic.Bool); + // If a backup is running, wait for it to complete. Will return an error + // if the backup encountered an error. rpc AwaitBackupCompletion(generic.Empty) returns (generic.Empty); } @@ -30,11 +37,6 @@ message OpenCollectionRequest { message CloseCollectionRequest { bool downgrade_to_schema11 = 1; - // Skip backup if unset - optional string backup_folder = 2; - // Skip backup if one has been created in the last x seconds. - // If unset a default value is used. - optional uint64 minimum_backup_interval = 3; } message CheckDatabaseResponse { @@ -117,3 +119,10 @@ message Progress { uint32 exporting = 8; } } + +message CreateBackupRequest { + string backup_folder = 1; + // Create a backup even if the configured interval hasn't elapsed yet. + bool force = 2; + bool wait_for_completion = 3; +} diff --git a/proto/anki/config.proto b/proto/anki/config.proto index 8c793d6d9..90423518e 100644 --- a/proto/anki/config.proto +++ b/proto/anki/config.proto @@ -117,14 +117,15 @@ message Preferences { string default_search_text = 4; bool ignore_accents_in_search = 5; } - message Backups { + message BackupLimits { uint32 daily = 1; uint32 weekly = 2; uint32 monthly = 3; + uint32 minimum_interval_mins = 4; } Scheduling scheduling = 1; Reviewing reviewing = 2; Editing editing = 3; - Backups backups = 4; + BackupLimits backups = 4; } diff --git a/pylib/anki/collection.py b/pylib/anki/collection.py index 65ed984b3..20ef2a8af 100644 --- a/pylib/anki/collection.py +++ b/pylib/anki/collection.py @@ -239,8 +239,6 @@ class Collection(DeprecatedNamesMixin): self, save: bool = True, downgrade: bool = False, - backup_folder: str | None = None, - minimum_backup_interval: int | None = None, ) -> None: "Disconnect from DB." if self.db: @@ -249,12 +247,9 @@ class Collection(DeprecatedNamesMixin): else: self.db.rollback() self._clear_caches() - request = collection_pb2.CloseCollectionRequest( + self._backend.close_collection( downgrade_to_schema11=downgrade, - backup_folder=backup_folder, - minimum_backup_interval=minimum_backup_interval, ) - self._backend.close_collection(request) self.db = None def close_for_full_sync(self) -> None: @@ -326,6 +321,44 @@ class Collection(DeprecatedNamesMixin): else: return -1 + def create_backup( + self, + *, + backup_folder: str, + force: bool, + wait_for_completion: bool, + ) -> bool: + """Create a backup if enough time has elapsed, and rotate old backups. + + If `force` is true, the user's configured backup interval is ignored. + Returns true if backup created. This may be false in the force=True case, + if no changes have been made to the collection. + + Commits any outstanding changes, which clears any active legacy checkpoint. + + Throws on failure of current backup, or the previous backup if it was not + awaited. + """ + # ensure any pending transaction from legacy code/add-ons has been committed + self.save(trx=False) + created = self._backend.create_backup( + backup_folder=backup_folder, + force=force, + wait_for_completion=wait_for_completion, + ) + self.db.begin() + return created + + def await_backup_completion(self) -> None: + "Throws if backup creation failed." + self._backend.await_backup_completion() + + def legacy_checkpoint_pending(self) -> bool: + return ( + self._have_outstanding_checkpoint() + and time.time() - self._last_checkpoint_at < 300 + ) + # Object helpers ########################################################################## diff --git a/qt/aqt/dbcheck.py b/qt/aqt/dbcheck.py index e5143c43d..775abbece 100644 --- a/qt/aqt/dbcheck.py +++ b/qt/aqt/dbcheck.py @@ -32,6 +32,10 @@ def check_db(mw: aqt.AnkiQt) -> None: qconnect(timer.timeout, on_timer) timer.start(100) + def do_check() -> tuple[str, bool]: + mw.create_backup_now() + return mw.col.fix_integrity() + def on_future_done(fut: Future) -> None: timer.stop() ret, ok = fut.result() @@ -54,4 +58,4 @@ def check_db(mw: aqt.AnkiQt) -> None: n += 1 continue - mw.taskman.with_progress(mw.col.fix_integrity, on_future_done) + mw.taskman.with_progress(do_check, on_future_done) diff --git a/qt/aqt/forms/main.ui b/qt/aqt/forms/main.ui index 5f7482d12..b80137aa6 100644 --- a/qt/aqt/forms/main.ui +++ b/qt/aqt/forms/main.ui @@ -26,7 +26,7 @@ Anki - + :/icons/anki.png:/icons/anki.png @@ -46,7 +46,7 @@ 0 0 667 - 22 + 24 @@ -74,6 +74,8 @@ + + @@ -270,6 +272,11 @@ Ctrl+0 + + + qt_accel_create_backup + + diff --git a/qt/aqt/forms/preferences.ui b/qt/aqt/forms/preferences.ui index bbad22d30..f6526e132 100644 --- a/qt/aqt/forms/preferences.ui +++ b/qt/aqt/forms/preferences.ui @@ -502,49 +502,14 @@ - - - - 9999 - - - - - - - 9999 - - - - - - - preferences_weekly_backups - - - - - - - preferences_daily_backups - - - - + 9999 - - - - preferences_monthly_backups - - - - + Qt::Horizontal @@ -557,7 +522,55 @@ - + + + + 9999 + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + preferences_daily_backups + + + + + + + preferences_monthly_backups + + + + + + + 9999 + + + + + + + preferences_weekly_backups + + + + Qt::Horizontal @@ -570,8 +583,25 @@ - - + + + + preferences_minutes_between_backups + + + + + + + 5 + + + 9999 + + + + + Qt::Horizontal @@ -666,8 +696,9 @@ syncDeauth media_log tabWidget - weekly_backups + minutes_between_backups daily_backups + weekly_backups monthly_backups diff --git a/qt/aqt/importing.py b/qt/aqt/importing.py index 7e5f65c64..c45570486 100644 --- a/qt/aqt/importing.py +++ b/qt/aqt/importing.py @@ -15,6 +15,7 @@ from anki.errors import Interrupted from anki.importing.anki2 import V2ImportIntoV1 from anki.importing.apkg import AnkiPackageImporter from aqt import AnkiQt, gui_hooks +from aqt.operations import QueryOp from aqt.qt import * from aqt.utils import ( HelpPage, @@ -444,13 +445,18 @@ def setupApkgImport(mw: AnkiQt, importer: AnkiPackageImporter) -> bool: return False -def full_apkg_import(mw: aqt.AnkiQt, file: str) -> None: +def full_apkg_import(mw: AnkiQt, file: str) -> None: def on_done(success: bool) -> None: mw.loadCollection() if success: tooltip(tr.importing_importing_complete()) - mw.unloadCollection(lambda: replace_with_apkg(mw, file, on_done)) + def after_backup(created: bool) -> None: + mw.unloadCollection(lambda: replace_with_apkg(mw, file, on_done)) + + QueryOp( + parent=mw, op=lambda _: mw.create_backup_now(), success=after_backup + ).with_progress().run_in_background() def replace_with_apkg( diff --git a/qt/aqt/main.py b/qt/aqt/main.py index cbcaa0666..cfc7f2279 100644 --- a/qt/aqt/main.py +++ b/qt/aqt/main.py @@ -25,7 +25,7 @@ import aqt.sound import aqt.stats import aqt.toolbar import aqt.webview -from anki import collection_pb2, hooks +from anki import hooks from anki._backend import RustBackend as _RustBackend from anki.collection import Collection, Config, OpChanges, UndoStatus from anki.decks import DeckDict, DeckId @@ -50,6 +50,7 @@ from aqt.flags import FlagManager from aqt.legacy import install_pylib_legacy from aqt.mediacheck import check_media_db from aqt.mediasync import MediaSyncer +from aqt.operations import QueryOp from aqt.operations.collection import redo, undo from aqt.operations.deck import set_current_deck from aqt.profiles import ProfileManager as ProfileManagerType @@ -547,10 +548,7 @@ class AnkiQt(QMainWindow): ) # clean up open collection if possible try: - request = collection_pb2.CloseCollectionRequest( - downgrade_to_schema11=False, backup_folder=None - ) - self.backend.close_collection(request) + self.backend.close_collection(downgrade_to_schema11=False) except Exception as e: print("unable to close collection:", e) self.col = None @@ -612,12 +610,15 @@ class AnkiQt(QMainWindow): except: corrupt = True - if corrupt or dev_mode or self.restoring_backup: - backup_folder = None - else: - backup_folder = self.pm.backupFolder() try: - self.col.close(downgrade=False, backup_folder=backup_folder) + if not corrupt and not dev_mode and not self.restoring_backup: + # default 5 minute throttle + self.col.create_backup( + backup_folder=self.pm.backupFolder(), + force=False, + wait_for_completion=False, + ) + self.col.close(downgrade=False) except Exception as e: print(e) corrupt = True @@ -630,11 +631,7 @@ class AnkiQt(QMainWindow): def _close_for_full_download(self) -> None: "Backup and prepare collection to be overwritten." - backup_folder = None if dev_mode else self.pm.backupFolder() - self.col.close( - downgrade=False, backup_folder=backup_folder, minimum_backup_interval=0 - ) - self.col.reopen(after_full_sync=False) + self.create_backup_now() self.col.close_for_full_sync() def apply_collection_options(self) -> None: @@ -1230,6 +1227,7 @@ title="{}" {}>{}""".format( ) qconnect(m.actionImport.triggered, self.onImport) qconnect(m.actionExport.triggered, self.onExport) + qconnect(m.action_create_backup.triggered, self.on_create_backup_now) qconnect(m.actionExit.triggered, self.close) # Help @@ -1329,6 +1327,13 @@ title="{}" {}>{}""".format( # ensure Python interpreter runs at least once per second, so that # SIGINT/SIGTERM is processed without a long delay self.progress.timer(1000, lambda: None, True, False, parent=self) + # periodic backups are checked every 5 minutes + self.progress.timer( + 5 * 60 * 1000, + self.on_periodic_backup_timer, + True, + parent=self, + ) def onRefreshTimer(self) -> None: if self.state == "deckBrowser": @@ -1344,6 +1349,69 @@ title="{}" {}>{}""".format( if elap > minutes * 60: self.maybe_auto_sync_media() + # Backups + ########################################################################## + + def on_periodic_backup_timer(self) -> None: + """Create a backup if enough time has elapsed and collection changed.""" + self._create_backup_with_progress(user_initiated=False) + + def on_create_backup_now(self) -> None: + self._create_backup_with_progress(user_initiated=True) + + def create_backup_now(self) -> None: + """Create a backup immediately, regardless of when the last one was created. + Waits until the backup completes. Intended to be used as part of a longer-running + CollectionOp/QueryOp.""" + self.col.create_backup( + backup_folder=self.pm.backupFolder(), + force=True, + wait_for_completion=True, + ) + + def _create_backup_with_progress(self, user_initiated: bool) -> None: + # if there's a legacy undo op, try again later + if not user_initiated and self.col.legacy_checkpoint_pending(): + return + + # The initial copy will display a progress window if it takes too long + def backup(col: Collection) -> bool: + return col.create_backup( + backup_folder=self.pm.backupFolder(), + force=user_initiated, + wait_for_completion=False, + ) + + def on_success(val: None) -> None: + if user_initiated: + tooltip(tr.profiles_backup_created(), parent=self) + + def on_failure(exc: Exception) -> None: + showWarning( + tr.profiles_backup_creation_failed(reason=str(exc)), parent=self + ) + + def after_backup_started(created: bool) -> None: + # Legacy checkpoint may have expired. + self.update_undo_actions() + + if user_initiated and not created: + tooltip(tr.profiles_backup_unchanged(), parent=self) + return + + # We await backup completion to confirm it was successful, but this step + # does not block collection access, so we don't need to show the progress + # window anymore. + QueryOp( + parent=self, + op=lambda col: col.await_backup_completion(), + success=on_success, + ).failure(on_failure).run_in_background() + + QueryOp(parent=self, op=backup, success=after_backup_started).failure( + on_failure + ).with_progress(tr.profiles_creating_backup()).run_in_background() + # Permanent hooks ########################################################################## diff --git a/qt/aqt/preferences.py b/qt/aqt/preferences.py index a921b8d62..5489e8707 100644 --- a/qt/aqt/preferences.py +++ b/qt/aqt/preferences.py @@ -103,6 +103,7 @@ class Preferences(QDialog): form.daily_backups.setValue(self.prefs.backups.daily) form.weekly_backups.setValue(self.prefs.backups.weekly) form.monthly_backups.setValue(self.prefs.backups.monthly) + form.minutes_between_backups.setValue(self.prefs.backups.minimum_interval_mins) def update_collection(self, on_done: Callable[[], None]) -> None: form = self.form @@ -133,6 +134,7 @@ class Preferences(QDialog): self.prefs.backups.daily = form.daily_backups.value() self.prefs.backups.weekly = form.weekly_backups.value() self.prefs.backups.monthly = form.monthly_backups.value() + self.prefs.backups.minimum_interval_mins = form.minutes_between_backups.value() def after_prefs_update(changes: OpChanges) -> None: self.mw.apply_collection_options() diff --git a/rslib/build/protobuf.rs b/rslib/build/protobuf.rs index 246d003f9..1472b0a09 100644 --- a/rslib/build/protobuf.rs +++ b/rslib/build/protobuf.rs @@ -107,7 +107,7 @@ pub fn write_backend_proto_rs() { ) .type_attribute("HelpPageLinkRequest.HelpPage", "#[derive(strum::EnumIter)]") .type_attribute( - "Preferences.Backups", + "Preferences.BackupLimits", "#[derive(Copy, serde_derive::Deserialize, serde_derive::Serialize)]", ) .compile_protos(paths.as_slice(), &[proto_dir]) diff --git a/rslib/src/backend/collection.rs b/rslib/src/backend/collection.rs index f328a49df..4832b9445 100644 --- a/rslib/src/backend/collection.rs +++ b/rslib/src/backend/collection.rs @@ -1,7 +1,7 @@ // Copyright: Ankitects Pty Ltd and contributors // License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html -use std::{path::Path, sync::MutexGuard}; +use std::sync::MutexGuard; use slog::error; @@ -9,9 +9,8 @@ use super::{progress::Progress, Backend}; pub(super) use crate::backend_proto::collection_service::Service as CollectionService; use crate::{ backend::progress::progress_to_proto, - backend_proto::{self as pb, preferences::Backups}, - collection::{backup, CollectionBuilder}, - log::{self}, + backend_proto::{self as pb}, + collection::CollectionBuilder, prelude::*, storage::SchemaVersion, }; @@ -47,28 +46,18 @@ impl CollectionService for Backend { } fn close_collection(&self, input: pb::CloseCollectionRequest) -> Result { + let desired_version = if input.downgrade_to_schema11 { + Some(SchemaVersion::V11) + } else { + None + }; + self.abort_media_sync_and_wait(); - let mut guard = self.lock_open_collection()?; + let col_inner = guard.take().unwrap(); - let mut col_inner = guard.take().unwrap(); - let limits = col_inner.get_backups(); - let col_path = std::mem::take(&mut col_inner.col_path); - - if input.downgrade_to_schema11 { - let log = log::terminal(); - if let Err(e) = col_inner.close(Some(SchemaVersion::V11)) { - error!(log, " failed: {:?}", e); - } - } - - if let Some(backup_folder) = input.backup_folder { - self.start_backup( - col_path, - backup_folder, - limits, - input.minimum_backup_interval, - )?; + if let Err(e) = col_inner.close(desired_version) { + error!(self.log, " failed: {:?}", e); } Ok(().into()) @@ -109,8 +98,32 @@ impl CollectionService for Backend { .map(Into::into) } + fn create_backup(&self, input: pb::CreateBackupRequest) -> Result { + // lock collection + let mut col_lock = self.lock_open_collection()?; + let col = col_lock.as_mut().unwrap(); + // await any previous backup first + let mut task_lock = self.backup_task.lock().unwrap(); + if let Some(task) = task_lock.take() { + task.join().unwrap()?; + } + // start the new backup + let created = if let Some(task) = col.maybe_backup(input.backup_folder, input.force)? { + if input.wait_for_completion { + drop(col_lock); + task.join().unwrap()?; + } else { + *task_lock = Some(task); + } + true + } else { + false + }; + Ok(created.into()) + } + fn await_backup_completion(&self, _input: pb::Empty) -> Result { - self.await_backup_completion(); + self.await_backup_completion()?; Ok(().into()) } } @@ -132,29 +145,10 @@ impl Backend { .ok_or(AnkiError::CollectionAlreadyOpen) } - fn await_backup_completion(&self) { + fn await_backup_completion(&self) -> Result<()> { if let Some(task) = self.backup_task.lock().unwrap().take() { - task.join().unwrap(); + task.join().unwrap()?; } - } - - fn start_backup( - &self, - col_path: impl AsRef, - backup_folder: impl AsRef + Send + 'static, - limits: Backups, - minimum_backup_interval: Option, - ) -> Result<()> { - self.await_backup_completion(); - *self.backup_task.lock().unwrap() = backup::backup( - col_path, - backup_folder, - limits, - minimum_backup_interval, - self.log.clone(), - self.tr.clone(), - )?; - Ok(()) } } diff --git a/rslib/src/backend/mod.rs b/rslib/src/backend/mod.rs index ca1805ec8..b4ec28cfa 100644 --- a/rslib/src/backend/mod.rs +++ b/rslib/src/backend/mod.rs @@ -78,7 +78,7 @@ pub struct Backend { runtime: OnceCell, log: Logger, state: Arc>, - backup_task: Arc>>>, + backup_task: Arc>>>>, } #[derive(Default)] diff --git a/rslib/src/collection/backup.rs b/rslib/src/collection/backup.rs index 40126ab9c..5244d1b93 100644 --- a/rslib/src/collection/backup.rs +++ b/rslib/src/collection/backup.rs @@ -14,34 +14,53 @@ use itertools::Itertools; use log::error; use crate::{ - backend_proto::preferences::Backups, import_export::package::export_colpkg_from_data, log, + backend_proto::preferences::BackupLimits, import_export::package::export_colpkg_from_data, log, prelude::*, }; const BACKUP_FORMAT_STRING: &str = "backup-%Y-%m-%d-%H.%M.%S.colpkg"; -/// Default seconds after a backup, in which further backups will be skipped. -const MINIMUM_BACKUP_INTERVAL: u64 = 5 * 60; -pub fn backup( - col_path: impl AsRef, - backup_folder: impl AsRef + Send + 'static, - limits: Backups, - minimum_backup_interval: Option, - log: Logger, - tr: I18n, -) -> Result>> { - let recent_secs = minimum_backup_interval.unwrap_or(MINIMUM_BACKUP_INTERVAL); - if recent_secs > 0 && has_recent_backup(backup_folder.as_ref(), recent_secs)? { - Ok(None) - } else { - let col_data = std::fs::read(col_path)?; - Ok(Some(thread::spawn(move || { - backup_inner(&col_data, &backup_folder, limits, log, &tr) - }))) +impl Collection { + /// Create a backup if enough time has elapsed, or if forced. + /// Returns a handle that can be awaited if a backup was created. + pub fn maybe_backup( + &mut self, + backup_folder: impl AsRef + Send + 'static, + force: bool, + ) -> Result>>> { + if !self.changed_since_last_backup()? { + return Ok(None); + } + let limits = self.get_backup_limits(); + if should_skip_backup(force, limits.minimum_interval_mins, backup_folder.as_ref())? { + Ok(None) + } else { + let log = self.log.clone(); + let tr = self.tr.clone(); + self.storage.checkpoint()?; + let col_data = std::fs::read(&self.col_path)?; + self.update_last_backup_timestamp()?; + Ok(Some(thread::spawn(move || { + backup_inner(&col_data, &backup_folder, limits, log, &tr) + }))) + } } } -fn has_recent_backup(backup_folder: &Path, recent_secs: u64) -> Result { +fn should_skip_backup( + force: bool, + minimum_interval_mins: u32, + backup_folder: &Path, +) -> Result { + if force { + Ok(false) + } else { + has_recent_backup(backup_folder, minimum_interval_mins) + } +} + +fn has_recent_backup(backup_folder: &Path, recent_mins: u32) -> Result { + let recent_secs = (recent_mins * 60) as u64; let now = SystemTime::now(); Ok(read_dir(backup_folder)? .filter_map(|res| res.ok()) @@ -54,16 +73,12 @@ fn has_recent_backup(backup_folder: &Path, recent_secs: u64) -> Result { fn backup_inner>( col_data: &[u8], backup_folder: P, - limits: Backups, + limits: BackupLimits, log: Logger, tr: &I18n, -) { - if let Err(error) = write_backup(col_data, backup_folder.as_ref(), tr) { - error!(log, "failed to backup collection: {error:?}"); - } - if let Err(error) = thin_backups(backup_folder, limits, &log) { - error!(log, "failed to thin backups: {error:?}"); - } +) -> Result<()> { + write_backup(col_data, backup_folder.as_ref(), tr)?; + thin_backups(backup_folder, limits, &log) } fn write_backup>(col_data: &[u8], backup_folder: S, tr: &I18n) -> Result<()> { @@ -72,7 +87,11 @@ fn write_backup>(col_data: &[u8], backup_folder: S, tr: &I18n) - export_colpkg_from_data(&out_path, col_data, tr) } -fn thin_backups>(backup_folder: P, limits: Backups, log: &Logger) -> Result<()> { +fn thin_backups>( + backup_folder: P, + limits: BackupLimits, + log: &Logger, +) -> Result<()> { let backups = read_dir(backup_folder)?.filter_map(|entry| entry.ok().and_then(Backup::from_entry)); let obsolete_backups = BackupFilter::new(Local::today(), limits).obsolete_backups(backups); @@ -135,7 +154,7 @@ struct BackupFilter { last_kept_day: i32, last_kept_week: i32, last_kept_month: u32, - limits: Backups, + limits: BackupLimits, obsolete: Vec, } @@ -147,7 +166,7 @@ enum BackupStage { } impl BackupFilter { - fn new(today: Date, limits: Backups) -> Self { + fn new(today: Date, limits: BackupLimits) -> Self { Self { yesterday: today.num_days_from_ce() - 1, last_kept_day: i32::MAX, @@ -257,10 +276,11 @@ mod test { #[test] fn thinning_manual() { let today = Local.ymd(2022, 2, 22); - let limits = Backups { + let limits = BackupLimits { daily: 3, weekly: 2, monthly: 1, + ..Default::default() }; // true => should be removed @@ -300,11 +320,12 @@ mod test { fn thinning_generic() { let today = Local.ymd(2022, 1, 1); let today_ce_days = today.num_days_from_ce(); - let limits = Backups { + let limits = BackupLimits { // config defaults daily: 12, weekly: 10, monthly: 9, + ..Default::default() }; let backups: Vec<_> = (1..366).map(|i| backup!(today_ce_days - i)).collect(); let mut expected = Vec::new(); diff --git a/rslib/src/collection/mod.rs b/rslib/src/collection/mod.rs index 9be3086fc..e1222804f 100644 --- a/rslib/src/collection/mod.rs +++ b/rslib/src/collection/mod.rs @@ -17,6 +17,7 @@ use crate::{ notetype::{Notetype, NotetypeId}, scheduler::{queue::CardQueues, SchedulerInfo}, storage::{SchemaVersion, SqliteStorage}, + timestamp::TimestampMillis, types::Usn, undo::UndoManager, }; @@ -116,6 +117,9 @@ pub struct CollectionState { /// True if legacy Python code has executed SQL that has modified the /// database, requiring modification time to be bumped. pub(crate) modified_by_dbproxy: bool, + /// The modification time at the last backup, so we don't create multiple + /// identical backups. + pub(crate) last_backup_modified: Option, } pub struct Collection { diff --git a/rslib/src/collection/timestamps.rs b/rslib/src/collection/timestamps.rs index a813e4436..9f1615ba2 100644 --- a/rslib/src/collection/timestamps.rs +++ b/rslib/src/collection/timestamps.rs @@ -29,4 +29,19 @@ impl Collection { let stamps = self.storage.get_collection_timestamps()?; self.set_schema_modified_time_undoable(TimestampMillis::now(), stamps.schema_change) } + + pub(crate) fn changed_since_last_backup(&self) -> Result { + let stamps = self.storage.get_collection_timestamps()?; + Ok(self + .state + .last_backup_modified + .map(|last_backup| last_backup != stamps.collection_change) + .unwrap_or(true)) + } + + pub(crate) fn update_last_backup_timestamp(&mut self) -> Result<()> { + self.state.last_backup_modified = + Some(self.storage.get_collection_timestamps()?.collection_change); + Ok(()) + } } diff --git a/rslib/src/config/mod.rs b/rslib/src/config/mod.rs index c4c8ccc4c..6668b382a 100644 --- a/rslib/src/config/mod.rs +++ b/rslib/src/config/mod.rs @@ -16,7 +16,7 @@ use strum::IntoStaticStr; pub use self::{ bool::BoolKey, deck::DeckConfigKey, notetype::get_aux_notetype_config_key, string::StringKey, }; -use crate::{backend_proto::preferences::Backups, prelude::*}; +use crate::{backend_proto::preferences::BackupLimits, prelude::*}; /// Only used when updating/undoing. #[derive(Debug)] @@ -266,18 +266,19 @@ impl Collection { .map(|_| ()) } - pub(crate) fn get_backups(&self) -> Backups { + pub(crate) fn get_backup_limits(&self) -> BackupLimits { self.get_config_optional(ConfigKey::Backups).unwrap_or( // 2d + 12d + 10w + 9m ≈ 1y - Backups { + BackupLimits { daily: 12, weekly: 10, monthly: 9, + minimum_interval_mins: 30, }, ) } - pub(crate) fn set_backups(&mut self, limits: Backups) -> Result<()> { + pub(crate) fn set_backup_limits(&mut self, limits: BackupLimits) -> Result<()> { self.set_config(ConfigKey::Backups, &limits).map(|_| ()) } } diff --git a/rslib/src/preferences.rs b/rslib/src/preferences.rs index be6bf75cf..2a7572350 100644 --- a/rslib/src/preferences.rs +++ b/rslib/src/preferences.rs @@ -19,7 +19,7 @@ impl Collection { scheduling: Some(self.get_scheduling_preferences()?), reviewing: Some(self.get_reviewing_preferences()?), editing: Some(self.get_editing_preferences()?), - backups: Some(self.get_backups()), + backups: Some(self.get_backup_limits()), }) } @@ -40,7 +40,7 @@ impl Collection { self.set_editing_preferences(editing)?; } if let Some(backups) = prefs.backups { - self.set_backups(backups)?; + self.set_backup_limits(backups)?; } Ok(()) } diff --git a/rslib/src/storage/sqlite.rs b/rslib/src/storage/sqlite.rs index 330c4a871..58e3c2cfe 100644 --- a/rslib/src/storage/sqlite.rs +++ b/rslib/src/storage/sqlite.rs @@ -274,6 +274,29 @@ impl SqliteStorage { Ok(()) } + /// Flush data from WAL file into DB, so the DB is safe to copy. Caller must not call this + /// while there is an active transaction. + pub(crate) fn checkpoint(&self) -> Result<()> { + if !self.db.is_autocommit() { + return Err(AnkiError::db_error( + "active transaction", + DbErrorKind::Other, + )); + } + self.db + .query_row_and_then("pragma wal_checkpoint(truncate)", [], |row| { + let error_code: i64 = row.get(0)?; + if error_code != 0 { + Err(AnkiError::db_error( + "unable to checkpoint", + DbErrorKind::Other, + )) + } else { + Ok(()) + } + }) + } + // Standard transaction start/stop //////////////////////////////////////