mirror of
https://github.com/ankitects/anki.git
synced 2025-09-19 14:32:22 -04:00
change sync label to indicate sync state
- blue for normal sync, red for full sync required - refactor status fetching code so we don't hold a collection lock during the network request, which slows things down - fix sync spinner restarting when returning to deck list
This commit is contained in:
parent
ac219ae728
commit
4d7e23111e
14 changed files with 261 additions and 71 deletions
|
@ -164,7 +164,7 @@ service BackendService {
|
|||
rpc AbortMediaSync (Empty) returns (Empty);
|
||||
rpc BeforeUpload (Empty) returns (Empty);
|
||||
rpc SyncLogin (SyncLoginIn) returns (SyncAuth);
|
||||
rpc SyncStatus (SyncAuth) returns (SyncCollectionOut);
|
||||
rpc SyncStatus (SyncAuth) returns (SyncStatusOut);
|
||||
rpc SyncCollection (SyncAuth) returns (SyncCollectionOut);
|
||||
rpc FullUpload (SyncAuth) returns (Empty);
|
||||
rpc FullDownload (SyncAuth) returns (Empty);
|
||||
|
@ -904,6 +904,15 @@ message SyncLoginIn {
|
|||
string password = 2;
|
||||
}
|
||||
|
||||
message SyncStatusOut {
|
||||
enum Required {
|
||||
NO_CHANGES = 0;
|
||||
NORMAL_SYNC = 1;
|
||||
FULL_SYNC = 2;
|
||||
}
|
||||
Required required = 1;
|
||||
}
|
||||
|
||||
message SyncCollectionOut {
|
||||
enum ChangesRequired {
|
||||
NO_CHANGES = 0;
|
||||
|
|
|
@ -54,6 +54,7 @@ DeckTreeNode = pb.DeckTreeNode
|
|||
StockNoteType = pb.StockNoteType
|
||||
SyncAuth = pb.SyncAuth
|
||||
SyncOutput = pb.SyncCollectionOut
|
||||
SyncStatus = pb.SyncStatusOut
|
||||
|
||||
try:
|
||||
import orjson
|
||||
|
|
|
@ -107,7 +107,10 @@ def render_method(method, idx):
|
|||
input_args = f"self, input: {fullname(method.input_type.full_name)}"
|
||||
input_assign_outer = ""
|
||||
name = fix_snakecase(stringcase.snakecase(method.name))
|
||||
if len(method.output_type.fields) == 1:
|
||||
if (
|
||||
len(method.output_type.fields) == 1
|
||||
and method.output_type.fields[0].type != TYPE_ENUM
|
||||
):
|
||||
# unwrap single return arg
|
||||
f = method.output_type.fields[0]
|
||||
single_field = f".{f.name}"
|
||||
|
|
1
qt/.gitignore
vendored
1
qt/.gitignore
vendored
|
@ -19,6 +19,7 @@ aqt_data/web/overview.js
|
|||
aqt_data/web/reviewer-bottom.js
|
||||
aqt_data/web/reviewer.js
|
||||
aqt_data/web/webview.js
|
||||
aqt_data/web/toolbar.js
|
||||
dist
|
||||
aqt.egg-info
|
||||
build
|
||||
|
|
|
@ -15,6 +15,7 @@ from anki.utils import ids2str
|
|||
from aqt import AnkiQt, gui_hooks
|
||||
from aqt.qt import *
|
||||
from aqt.sound import av_player
|
||||
from aqt.sync import get_sync_status
|
||||
from aqt.toolbar import BottomBar
|
||||
from aqt.utils import askUser, getOnlyText, openLink, shortcut, showWarning, tr
|
||||
|
||||
|
@ -57,7 +58,7 @@ class DeckBrowser:
|
|||
self.web.set_bridge_command(self._linkHandler, self)
|
||||
self._renderPage()
|
||||
# redraw top bar for theme change
|
||||
self.mw.toolbar.draw()
|
||||
self.mw.toolbar.redraw()
|
||||
|
||||
def refresh(self):
|
||||
self._renderPage()
|
||||
|
|
|
@ -78,6 +78,7 @@ class ProfileManager:
|
|||
def __init__(self, base=None):
|
||||
self.name = None
|
||||
self.db = None
|
||||
self.profile: Optional[Dict] = None
|
||||
# instantiate base folder
|
||||
self._setBaseFolder(base)
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ from anki.rsbackend import (
|
|||
SyncError,
|
||||
SyncErrorKind,
|
||||
SyncOutput,
|
||||
SyncStatus,
|
||||
)
|
||||
from aqt.qt import (
|
||||
QDialog,
|
||||
|
@ -37,13 +38,19 @@ class FullSyncChoice(enum.Enum):
|
|||
DOWNLOAD = 2
|
||||
|
||||
|
||||
def get_sync_status(mw: aqt.main.AnkiQt, callback: Callable[[SyncOutput], None]):
|
||||
def get_sync_status(mw: aqt.main.AnkiQt, callback: Callable[[SyncStatus], None]):
|
||||
auth = mw.pm.sync_auth()
|
||||
if not auth:
|
||||
return
|
||||
return SyncStatus(required=SyncStatus.NO_CHANGES)
|
||||
|
||||
def on_future_done(fut):
|
||||
callback(fut.result())
|
||||
try:
|
||||
out = fut.result()
|
||||
except Exception as e:
|
||||
# swallow errors
|
||||
print("sync status check failed:", str(e))
|
||||
return
|
||||
callback(out)
|
||||
|
||||
mw.taskman.run_in_background(
|
||||
lambda: mw.col.backend.sync_status(auth), on_future_done
|
||||
|
|
|
@ -8,8 +8,10 @@ from typing import Any, Dict, Optional
|
|||
|
||||
import aqt
|
||||
from anki.lang import _
|
||||
from anki.rsbackend import SyncStatus
|
||||
from aqt import gui_hooks
|
||||
from aqt.qt import *
|
||||
from aqt.sync import get_sync_status
|
||||
from aqt.webview import AnkiWebView
|
||||
|
||||
|
||||
|
@ -45,11 +47,16 @@ class Toolbar:
|
|||
link_handler = link_handler or self._linkHandler
|
||||
self.web.set_bridge_command(link_handler, web_context)
|
||||
self.web.stdHtml(
|
||||
self._body % self._centerLinks(), css=["toolbar.css"], context=web_context,
|
||||
self._body % self._centerLinks(),
|
||||
css=["toolbar.css"],
|
||||
js=["webview.js", "jquery.js", "toolbar.js"],
|
||||
context=web_context,
|
||||
)
|
||||
self.web.adjustHeightToFit()
|
||||
if self.mw.media_syncer.is_syncing():
|
||||
self.set_sync_active(True)
|
||||
|
||||
def redraw(self) -> None:
|
||||
self.set_sync_active(self.mw.media_syncer.is_syncing())
|
||||
self.update_sync_status()
|
||||
|
||||
# Available links
|
||||
######################################################################
|
||||
|
@ -128,6 +135,9 @@ class Toolbar:
|
|||
|
||||
return "\n".join(links)
|
||||
|
||||
# Sync
|
||||
######################################################################
|
||||
|
||||
def _create_sync_link(self) -> str:
|
||||
name = _("Sync")
|
||||
title = _("Shortcut key: %s") % "Y"
|
||||
|
@ -146,6 +156,12 @@ class Toolbar:
|
|||
meth = "removeClass"
|
||||
self.web.eval(f"$('#sync-spinner').{meth}('spin')")
|
||||
|
||||
def set_sync_status(self, status: SyncStatus) -> None:
|
||||
self.web.eval(f"updateSyncColor({status.required})")
|
||||
|
||||
def update_sync_status(self) -> None:
|
||||
get_sync_status(self.mw, self.mw.toolbar.set_sync_status)
|
||||
|
||||
# Link handling
|
||||
######################################################################
|
||||
|
||||
|
|
|
@ -45,6 +45,14 @@ body {
|
|||
#header {
|
||||
border-bottom-color: vars.$night-faint-border;
|
||||
}
|
||||
|
||||
.normal-sync {
|
||||
color: vars.$night-new-count;
|
||||
}
|
||||
|
||||
.full-sync {
|
||||
color: vars.$night-learn-count;
|
||||
}
|
||||
}
|
||||
|
||||
.isMac.nightMode #header {
|
||||
|
@ -71,3 +79,13 @@ body {
|
|||
margin-bottom: -3px;
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
.normal-sync {
|
||||
color: vars.$day-new-count;
|
||||
}
|
||||
|
||||
.full-sync {
|
||||
color: vars.$day-learn-count;
|
||||
}
|
||||
|
||||
|
||||
|
|
22
qt/ts/src/toolbar.ts
Normal file
22
qt/ts/src/toolbar.ts
Normal file
|
@ -0,0 +1,22 @@
|
|||
enum SyncState {
|
||||
NoChanges = 0,
|
||||
Normal,
|
||||
Full,
|
||||
}
|
||||
|
||||
function updateSyncColor(state: SyncState) {
|
||||
const elem = document.getElementById("sync");
|
||||
switch (state) {
|
||||
case SyncState.NoChanges:
|
||||
elem.classList.remove("full-sync", "normal-sync");
|
||||
break;
|
||||
case SyncState.Normal:
|
||||
elem.classList.add("normal-sync");
|
||||
elem.classList.remove("full-sync");
|
||||
break;
|
||||
case SyncState.Full:
|
||||
elem.classList.add("full-sync");
|
||||
elem.classList.remove("normal-sync");
|
||||
break;
|
||||
}
|
||||
}
|
|
@ -33,8 +33,8 @@ use crate::{
|
|||
sched::timespan::{answer_button_time, learning_congrats, studied_today, time_span},
|
||||
search::SortMode,
|
||||
sync::{
|
||||
sync_abort, sync_login, FullSyncProgress, NormalSyncProgress, SyncActionRequired, SyncAuth,
|
||||
SyncOutput, SyncStage,
|
||||
get_remote_sync_meta, sync_abort, sync_login, FullSyncProgress, NormalSyncProgress,
|
||||
SyncActionRequired, SyncAuth, SyncMeta, SyncOutput, SyncStage,
|
||||
},
|
||||
template::RenderedNode,
|
||||
text::{extract_av_tags, strip_av_tags, AVTag},
|
||||
|
@ -44,7 +44,7 @@ use crate::{
|
|||
use fluent::FluentValue;
|
||||
use futures::future::{AbortHandle, Abortable};
|
||||
use log::error;
|
||||
use pb::BackendService;
|
||||
use pb::{sync_status_out, BackendService};
|
||||
use prost::Message;
|
||||
use serde_json::Value as JsonValue;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
@ -92,6 +92,27 @@ pub struct Backend {
|
|||
media_sync_abort: Option<AbortHandle>,
|
||||
progress_state: Arc<Mutex<ProgressState>>,
|
||||
runtime: Option<Runtime>,
|
||||
state: Arc<Mutex<BackendState>>,
|
||||
}
|
||||
|
||||
// fixme: move other items like runtime into here as well
|
||||
|
||||
#[derive(Default)]
|
||||
struct BackendState {
|
||||
remote_sync_status: RemoteSyncStatus,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub(crate) struct RemoteSyncStatus {
|
||||
last_check: TimestampSecs,
|
||||
last_response: sync_status_out::Required,
|
||||
}
|
||||
|
||||
impl RemoteSyncStatus {
|
||||
fn update(&mut self, required: sync_status_out::Required) {
|
||||
self.last_check = TimestampSecs::now();
|
||||
self.last_response = required
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
|
@ -971,12 +992,12 @@ impl BackendService for Backend {
|
|||
self.sync_login_inner(input)
|
||||
}
|
||||
|
||||
fn sync_status(&mut self, input: pb::SyncAuth) -> BackendResult<pb::SyncCollectionOut> {
|
||||
self.sync_collection_inner(input, true)
|
||||
fn sync_status(&mut self, input: pb::SyncAuth) -> BackendResult<pb::SyncStatusOut> {
|
||||
self.sync_status_inner(input)
|
||||
}
|
||||
|
||||
fn sync_collection(&mut self, input: pb::SyncAuth) -> BackendResult<pb::SyncCollectionOut> {
|
||||
self.sync_collection_inner(input, false)
|
||||
self.sync_collection_inner(input)
|
||||
}
|
||||
|
||||
fn full_upload(&mut self, input: pb::SyncAuth) -> BackendResult<Empty> {
|
||||
|
@ -1157,6 +1178,7 @@ impl Backend {
|
|||
last_progress: None,
|
||||
})),
|
||||
runtime: None,
|
||||
state: Arc::new(Mutex::new(BackendState::default())),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1265,10 +1287,38 @@ impl Backend {
|
|||
})
|
||||
}
|
||||
|
||||
fn sync_status_inner(&mut self, input: pb::SyncAuth) -> BackendResult<pb::SyncStatusOut> {
|
||||
// any local changes mean we can skip the network round-trip
|
||||
let req = self.with_col(|col| col.get_local_sync_status())?;
|
||||
if req != pb::sync_status_out::Required::NoChanges {
|
||||
return Ok(req.into());
|
||||
}
|
||||
|
||||
// return cached server response if only a short time has elapsed
|
||||
{
|
||||
let guard = self.state.lock().unwrap();
|
||||
if guard.remote_sync_status.last_check.elapsed_secs() < 300 {
|
||||
return Ok(guard.remote_sync_status.last_response.into());
|
||||
}
|
||||
}
|
||||
|
||||
// fetch and cache result
|
||||
let rt = self.runtime_handle();
|
||||
let remote: SyncMeta = rt.block_on(get_remote_sync_meta(input.into()))?;
|
||||
let response = self.with_col(|col| col.get_sync_status(remote).map(Into::into))?;
|
||||
|
||||
{
|
||||
let mut guard = self.state.lock().unwrap();
|
||||
guard.remote_sync_status.last_check = TimestampSecs::now();
|
||||
guard.remote_sync_status.last_response = response;
|
||||
}
|
||||
|
||||
Ok(response.into())
|
||||
}
|
||||
|
||||
fn sync_collection_inner(
|
||||
&mut self,
|
||||
input: pb::SyncAuth,
|
||||
check_only: bool,
|
||||
) -> BackendResult<pb::SyncCollectionOut> {
|
||||
let (abort_handle, abort_reg) = AbortHandle::new_pair();
|
||||
self.sync_abort = Some(abort_handle);
|
||||
|
@ -1277,11 +1327,6 @@ impl Backend {
|
|||
let input_copy = input.clone();
|
||||
|
||||
let ret = self.with_col(|col| {
|
||||
let result = if check_only {
|
||||
let sync_fut = col.get_sync_status(input.into());
|
||||
let abortable_sync = Abortable::new(sync_fut, abort_reg);
|
||||
rt.block_on(abortable_sync)
|
||||
} else {
|
||||
let mut handler = self.new_progress_handler();
|
||||
let progress_fn = move |progress: NormalSyncProgress, throttle: bool| {
|
||||
handler.update(progress, throttle);
|
||||
|
@ -1289,27 +1334,29 @@ impl Backend {
|
|||
|
||||
let sync_fut = col.normal_sync(input.into(), progress_fn);
|
||||
let abortable_sync = Abortable::new(sync_fut, abort_reg);
|
||||
rt.block_on(abortable_sync)
|
||||
};
|
||||
match result {
|
||||
|
||||
match rt.block_on(abortable_sync) {
|
||||
Ok(sync_result) => sync_result,
|
||||
Err(_) => {
|
||||
// if the user aborted, we'll need to clean up the transaction
|
||||
if !check_only {
|
||||
col.storage.rollback_trx()?;
|
||||
// and tell AnkiWeb to clean up
|
||||
let _handle = std::thread::spawn(move || {
|
||||
let _ =
|
||||
rt.block_on(sync_abort(input_copy.hkey, input_copy.host_number));
|
||||
let _ = rt.block_on(sync_abort(input_copy.hkey, input_copy.host_number));
|
||||
});
|
||||
}
|
||||
|
||||
Err(AnkiError::Interrupted)
|
||||
}
|
||||
}
|
||||
});
|
||||
self.sync_abort = None;
|
||||
|
||||
let output: SyncOutput = ret?;
|
||||
self.state
|
||||
.lock()
|
||||
.unwrap()
|
||||
.remote_sync_status
|
||||
.update(output.required.into());
|
||||
Ok(output.into())
|
||||
}
|
||||
|
||||
|
@ -1361,7 +1408,16 @@ impl Backend {
|
|||
)?);
|
||||
|
||||
match result {
|
||||
Ok(sync_result) => sync_result,
|
||||
Ok(sync_result) => {
|
||||
if sync_result.is_ok() {
|
||||
self.state
|
||||
.lock()
|
||||
.unwrap()
|
||||
.remote_sync_status
|
||||
.update(sync_status_out::Required::NoChanges);
|
||||
}
|
||||
sync_result
|
||||
}
|
||||
Err(_) => Err(AnkiError::Interrupted),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -307,6 +307,15 @@ impl SqliteStorage {
|
|||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub(crate) fn get_last_sync(&self) -> Result<TimestampMillis> {
|
||||
self.db
|
||||
.prepare_cached("select ls from col")?
|
||||
.query_and_then(NO_PARAMS, |r| r.get(0))?
|
||||
.next()
|
||||
.ok_or_else(|| AnkiError::invalid_input("missing col"))?
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub(crate) fn set_last_sync(&self, stamp: TimestampMillis) -> Result<()> {
|
||||
self.db
|
||||
.prepare("update col set ls = ?")?
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
mod http_client;
|
||||
|
||||
use crate::{
|
||||
backend_proto::{sync_status_out, SyncStatusOut},
|
||||
card::{Card, CardQueue, CardType},
|
||||
deckconf::DeckConfSchema11,
|
||||
decks::DeckSchema11,
|
||||
|
@ -211,7 +212,7 @@ pub struct FullSyncProgress {
|
|||
pub total_bytes: usize,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
#[derive(PartialEq, Debug, Clone, Copy)]
|
||||
pub enum SyncActionRequired {
|
||||
NoChanges,
|
||||
FullSyncRequired { upload_ok: bool, download_ok: bool },
|
||||
|
@ -262,6 +263,35 @@ impl Usn {
|
|||
}
|
||||
}
|
||||
|
||||
impl SyncMeta {
|
||||
fn compared_to_remote(&self, remote: SyncMeta) -> SyncState {
|
||||
let local = self;
|
||||
let required = if remote.modified == local.modified {
|
||||
SyncActionRequired::NoChanges
|
||||
} else if remote.schema != local.schema {
|
||||
let upload_ok = !local.empty || remote.empty;
|
||||
let download_ok = !remote.empty || local.empty;
|
||||
SyncActionRequired::FullSyncRequired {
|
||||
upload_ok,
|
||||
download_ok,
|
||||
}
|
||||
} else {
|
||||
SyncActionRequired::NormalSyncRequired
|
||||
};
|
||||
|
||||
SyncState {
|
||||
required,
|
||||
local_is_newer: local.modified > remote.modified,
|
||||
usn_at_last_sync: local.usn,
|
||||
latest_usn: remote.usn,
|
||||
pending_usn: Usn(-1),
|
||||
new_usn: Some(remote.usn),
|
||||
server_message: remote.server_message,
|
||||
host_number: remote.host_number,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> NormalSyncer<'_, F>
|
||||
where
|
||||
F: FnMut(NormalSyncProgress, bool),
|
||||
|
@ -338,29 +368,7 @@ where
|
|||
});
|
||||
}
|
||||
|
||||
let required = if remote.modified == local.modified {
|
||||
SyncActionRequired::NoChanges
|
||||
} else if remote.schema != local.schema {
|
||||
let upload_ok = !local.empty || remote.empty;
|
||||
let download_ok = !remote.empty || local.empty;
|
||||
SyncActionRequired::FullSyncRequired {
|
||||
upload_ok,
|
||||
download_ok,
|
||||
}
|
||||
} else {
|
||||
SyncActionRequired::NormalSyncRequired
|
||||
};
|
||||
|
||||
Ok(SyncState {
|
||||
required,
|
||||
local_is_newer: local.modified > remote.modified,
|
||||
usn_at_last_sync: local.usn,
|
||||
latest_usn: remote.usn,
|
||||
pending_usn: Usn(-1),
|
||||
new_usn: Some(remote.usn),
|
||||
server_message: remote.server_message,
|
||||
host_number: remote.host_number,
|
||||
})
|
||||
Ok(local.compared_to_remote(remote))
|
||||
}
|
||||
|
||||
/// Sync. Caller must have created a transaction, and should call
|
||||
|
@ -593,12 +601,29 @@ pub async fn sync_abort(hkey: String, host_number: u32) -> Result<()> {
|
|||
remote.abort().await
|
||||
}
|
||||
|
||||
pub(crate) async fn get_remote_sync_meta(auth: SyncAuth) -> Result<SyncMeta> {
|
||||
let remote = HTTPSyncClient::new(Some(auth.hkey), auth.host_number);
|
||||
remote.meta().await
|
||||
}
|
||||
|
||||
impl Collection {
|
||||
pub async fn get_sync_status(&mut self, auth: SyncAuth) -> Result<SyncOutput> {
|
||||
NormalSyncer::new(self, auth, |_p, _t| ())
|
||||
.get_sync_state()
|
||||
.await
|
||||
.map(Into::into)
|
||||
pub fn get_local_sync_status(&mut self) -> Result<sync_status_out::Required> {
|
||||
let last_sync = self.storage.get_last_sync()?;
|
||||
let schema_mod = self.storage.get_schema_mtime()?;
|
||||
let normal_mod = self.storage.get_modified_time()?;
|
||||
let required = if schema_mod > last_sync {
|
||||
sync_status_out::Required::FullSync
|
||||
} else if normal_mod > last_sync {
|
||||
sync_status_out::Required::NormalSync
|
||||
} else {
|
||||
sync_status_out::Required::NoChanges
|
||||
};
|
||||
|
||||
Ok(required)
|
||||
}
|
||||
|
||||
pub fn get_sync_status(&self, remote: SyncMeta) -> Result<sync_status_out::Required> {
|
||||
Ok(self.sync_meta()?.compared_to_remote(remote).required.into())
|
||||
}
|
||||
|
||||
pub async fn normal_sync<F>(&mut self, auth: SyncAuth, progress_fn: F) -> Result<SyncOutput>
|
||||
|
@ -1130,6 +1155,22 @@ impl From<SyncState> for SyncOutput {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<sync_status_out::Required> for SyncStatusOut {
|
||||
fn from(r: sync_status_out::Required) -> Self {
|
||||
SyncStatusOut { required: r.into() }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SyncActionRequired> for sync_status_out::Required {
|
||||
fn from(r: SyncActionRequired) -> Self {
|
||||
match r {
|
||||
SyncActionRequired::NoChanges => sync_status_out::Required::NoChanges,
|
||||
SyncActionRequired::FullSyncRequired { .. } => sync_status_out::Required::FullSync,
|
||||
SyncActionRequired::NormalSyncRequired => sync_status_out::Required::NormalSync,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
@ -1246,8 +1287,9 @@ mod test {
|
|||
// col1.storage.set_creation_stamp(TimestampSecs(12345))?;
|
||||
|
||||
// and sync our changes
|
||||
let out: SyncOutput = col1.get_sync_status(ctx.auth.clone()).await?;
|
||||
assert_eq!(out.required, SyncActionRequired::NormalSyncRequired);
|
||||
let remote = get_remote_sync_meta(ctx.auth.clone()).await?;
|
||||
let out = col1.get_sync_status(remote)?;
|
||||
assert_eq!(out, sync_status_out::Required::NormalSync);
|
||||
|
||||
let out: SyncOutput = col1.normal_sync(ctx.auth.clone(), norm_progress).await?;
|
||||
assert_eq!(out.required, SyncActionRequired::NoChanges);
|
||||
|
|
|
@ -13,6 +13,10 @@ impl TimestampSecs {
|
|||
pub fn now() -> Self {
|
||||
Self(elapsed().as_secs() as i64)
|
||||
}
|
||||
|
||||
pub fn elapsed_secs(self) -> u64 {
|
||||
(Self::now().0 - self.0).max(0) as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl TimestampMillis {
|
||||
|
|
Loading…
Reference in a new issue