more syncing work

no checks yet
This commit is contained in:
Damien Elmes 2020-05-28 19:49:44 +10:00
parent 529e89f48e
commit f10d0ee0cb
23 changed files with 1132 additions and 112 deletions

View file

@ -159,6 +159,8 @@ service BackendService {
rpc SyncMedia (SyncMediaIn) returns (Empty);
rpc AbortMediaSync (Empty) returns (Empty);
rpc BeforeUpload (Empty) returns (Empty);
rpc SyncLogin (SyncLoginIn) returns (SyncLoginOut);
rpc SyncCollection (SyncCollectionIn) returns (SyncCollectionOut);
// translation/messages
@ -169,7 +171,6 @@ service BackendService {
rpc RegisterTags (RegisterTagsIn) returns (Bool);
rpc AllTags (Empty) returns (AllTagsOut);
rpc GetChangedTags (Int32) returns (GetChangedTagsOut);
// config/preferences
@ -471,6 +472,8 @@ message SyncError {
SERVER_MESSAGE = 5;
MEDIA_CHECK_REQUIRED = 6;
RESYNC_REQUIRED = 7;
CLOCK_INCORRECT = 8;
DATABASE_CHECK_REQUIRED = 9;
}
SyncErrorKind kind = 1;
}
@ -875,3 +878,37 @@ message NoteIsDuplicateOrEmptyOut {
}
State state = 1;
}
message SyncLoginIn {
string username = 1;
string password = 2;
}
message SyncLoginOut {
string hkey = 1;
}
message SyncCollectionIn {
enum SyncAction {
CHECK_ONLY = 0;
NORMAL_SYNC = 1;
UPLOAD = 2;
DOWNLOAD = 3;
}
string hkey = 1;
uint32 host_number = 2;
SyncAction action = 3;
}
message SyncCollectionOut {
enum ChangesRequired {
NO_CHANGES = 0;
NORMAL_SYNC = 1;
FULL_SYNC = 2;
}
string host_number = 1;
string server_message = 2;
ChangesRequired required = 3;
}

View file

@ -121,7 +121,9 @@ impl std::convert::From<SyncErrorKind> for i32 {
SyncErrorKind::AuthFailed => V::AuthFailed,
SyncErrorKind::ServerMessage => V::ServerMessage,
SyncErrorKind::ResyncRequired => V::ResyncRequired,
SyncErrorKind::DatabaseCheckRequired => V::DatabaseCheckRequired,
SyncErrorKind::Other => V::Other,
SyncErrorKind::ClockIncorrect => V::ClockIncorrect,
}) as i32
}
}
@ -922,6 +924,17 @@ impl BackendService for Backend {
// sync
//-------------------------------------------------------------------
fn sync_login(&mut self, input: pb::SyncLoginIn) -> BackendResult<pb::SyncLoginOut> {
todo!()
}
fn sync_collection(
&mut self,
input: pb::SyncCollectionIn,
) -> BackendResult<pb::SyncCollectionOut> {
todo!()
}
fn sync_media(&mut self, input: SyncMediaIn) -> BackendResult<Empty> {
let mut guard = self.col.lock().unwrap();
@ -1013,16 +1026,6 @@ impl BackendService for Backend {
})
}
fn get_changed_tags(&mut self, input: pb::Int32) -> BackendResult<pb::GetChangedTagsOut> {
self.with_col(|col| {
col.transact(None, |col| {
Ok(pb::GetChangedTagsOut {
tags: col.storage.get_changed_tags(Usn(input.val))?,
})
})
})
}
// config/preferences
//-------------------------------------------------------------------

View file

@ -86,6 +86,9 @@ impl AnkiError {
SyncErrorKind::ClientTooOld => i18n.tr(TR::SyncClientTooOld),
SyncErrorKind::AuthFailed => i18n.tr(TR::SyncWrongPass),
SyncErrorKind::ResyncRequired => i18n.tr(TR::SyncResyncRequired),
// fixme: i18n
SyncErrorKind::ClockIncorrect => "Please check your clock.".into(),
SyncErrorKind::DatabaseCheckRequired => "Please check the database.".into(),
}
.into(),
AnkiError::NetworkError { kind, info } => {
@ -186,8 +189,10 @@ pub enum SyncErrorKind {
ClientTooOld,
AuthFailed,
ServerMessage,
ClockIncorrect,
Other,
ResyncRequired,
DatabaseCheckRequired,
}
fn error_for_status_code(info: String, code: StatusCode) -> AnkiError {

View file

@ -0,0 +1,42 @@
insert
or replace into cards (
id,
nid,
did,
ord,
mod,
usn,
type,
queue,
due,
ivl,
factor,
reps,
lapses,
left,
odue,
odid,
flags,
data
)
values
(
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?
)

View file

@ -6,6 +6,7 @@ use crate::{
decks::DeckID,
err::Result,
notes::NoteID,
sync::CardEntry,
timestamp::{TimestampMillis, TimestampSecs},
types::Usn,
};
@ -120,6 +121,34 @@ impl super::SqliteStorage {
Ok(())
}
/// Add or update card, using the provided ID. Used when syncing.
pub(crate) fn add_or_update_card(&self, card: &Card) -> Result<()> {
let now = TimestampMillis::now().0;
let mut stmt = self.db.prepare_cached(include_str!("add_or_update.sql"))?;
stmt.execute(params![
card.id,
card.nid,
card.did,
card.ord,
card.mtime,
card.usn,
card.ctype as u8,
card.queue as i8,
card.due,
card.ivl,
card.factor,
card.reps,
card.lapses,
card.left,
card.odue,
card.odid,
card.flags,
card.data,
])?;
Ok(())
}
pub(crate) fn remove_card(&self, cid: CardID) -> Result<()> {
self.db
.prepare_cached("delete from cards where id = ?")?
@ -191,6 +220,35 @@ impl super::SqliteStorage {
.execute(NO_PARAMS)?;
Ok(())
}
pub(crate) fn take_cards_pending_sync(
&self,
new_usn: Usn,
limit: usize,
) -> Result<Vec<CardEntry>> {
let mut out = vec![];
if limit == 0 {
return Ok(out);
}
let entries: Vec<CardEntry> = self
.db
.prepare_cached(concat!(
include_str!("get_card.sql"),
" where usn=-1 limit ?"
))?
.query_and_then(&[limit as u32], |r| {
row_to_card(r).map(Into::into).map_err(Into::into)
})?
.collect::<Result<_>>()?;
let ids: Vec<_> = entries.iter().map(|e| e.id).collect();
self.db
.prepare_cached("update cards set usn=? where usn=-1")?
.execute(&[new_usn])?;
Ok(entries)
}
}
#[cfg(test)]

View file

@ -64,6 +64,13 @@ impl SqliteStorage {
Ok(())
}
pub(crate) fn clear_config_usns(&self) -> Result<()> {
self.db
.prepare("update config set usn = 0 where usn != 0")?
.execute(NO_PARAMS)?;
Ok(())
}
// Upgrading/downgrading
pub(super) fn upgrade_config_to_schema14(&self) -> Result<()> {

View file

@ -9,6 +9,7 @@ use crate::{
decks::{Deck, DeckCommon, DeckID, DeckKindProto, DeckSchema11, DueCounts},
err::{AnkiError, DBErrorKind, Result},
i18n::{I18n, TR},
prelude::*,
timestamp::TimestampMillis,
};
use prost::Message;
@ -101,6 +102,11 @@ impl SqliteStorage {
// fixme: bail instead of assert
pub(crate) fn update_deck(&self, deck: &Deck) -> Result<()> {
assert!(deck.id.0 != 0);
self.add_or_update_deck(deck)
}
/// Used for syncing; will keep existing ID.
pub(crate) fn add_or_update_deck(&self, deck: &Deck) -> Result<()> {
let mut stmt = self.db.prepare_cached(include_str!("update_deck.sql"))?;
let mut common = vec![];
deck.common.encode(&mut common)?;
@ -227,6 +233,19 @@ impl SqliteStorage {
Ok(())
}
pub(crate) fn take_deck_ids_pending_sync(&self, new_usn: Usn) -> Result<Vec<DeckID>> {
let ids: Vec<DeckID> = self
.db
.prepare("select id from decks where usn=-1")?
.query_and_then(NO_PARAMS, |r| r.get(0))?
.collect::<std::result::Result<_, rusqlite::Error>>()?;
self.db
.prepare("update decks set usn=? where usn=-1")?
.execute(&[new_usn])?;
Ok(ids)
}
// Upgrading/downgrading/legacy
pub(super) fn add_default_deck(&self, i18n: &I18n) -> Result<()> {

View file

@ -0,0 +1,4 @@
insert
or replace into deck_config (id, name, mtime_secs, usn, config)
values
(?, ?, ?, ?, ?);

View file

@ -6,6 +6,7 @@ use crate::{
deckconf::{DeckConf, DeckConfID, DeckConfSchema11, DeckConfigInner},
err::Result,
i18n::{I18n, TR},
prelude::*,
};
use prost::Message;
use rusqlite::{params, Row, NO_PARAMS};
@ -72,6 +73,22 @@ impl SqliteStorage {
Ok(())
}
/// Used for syncing.
pub(crate) fn add_or_update_deck_config(&self, conf: &DeckConf) -> Result<()> {
let mut conf_bytes = vec![];
conf.inner.encode(&mut conf_bytes)?;
self.db
.prepare_cached(include_str!("add_or_update.sql"))?
.execute(params![
conf.id,
conf.name,
conf.mtime_secs,
conf.usn,
conf_bytes,
])?;
Ok(())
}
pub(crate) fn remove_deck_conf(&self, dcid: DeckConfID) -> Result<()> {
self.db
.prepare_cached("delete from deck_config where id=?")?
@ -86,6 +103,22 @@ impl SqliteStorage {
Ok(())
}
pub(crate) fn take_deck_config_ids_pending_sync(
&self,
new_usn: Usn,
) -> Result<Vec<DeckConfID>> {
let ids: Vec<DeckConfID> = self
.db
.prepare("select id from deck_config where usn=-1")?
.query_and_then(NO_PARAMS, |r| r.get(0))?
.collect::<std::result::Result<_, rusqlite::Error>>()?;
self.db
.prepare("update deck_config set usn=? where usn=-1")?
.execute(&[new_usn])?;
Ok(ids)
}
// Creating/upgrading/downgrading
pub(super) fn add_default_deck_config(&self, i18n: &I18n) -> Result<()> {

View file

@ -2,9 +2,20 @@
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
use super::SqliteStorage;
use crate::{card::CardID, decks::DeckID, err::Result, notes::NoteID, types::Usn};
use crate::{
card::CardID,
decks::DeckID,
err::{AnkiError, Result},
notes::NoteID,
sync::Graves,
types::Usn,
};
use num_enum::TryFromPrimitive;
use rusqlite::{params, NO_PARAMS};
use std::convert::TryFrom;
#[derive(TryFromPrimitive)]
#[repr(u8)]
enum GraveKind {
Card,
Note,
@ -35,4 +46,28 @@ impl SqliteStorage {
pub(crate) fn add_deck_grave(&self, did: DeckID, usn: Usn) -> Result<()> {
self.add_grave(did.0, GraveKind::Deck, usn)
}
pub(crate) fn take_pending_graves(&self, new_usn: Usn) -> Result<Graves> {
let mut stmt = self
.db
.prepare("select oid, type from graves where usn=-1")?;
let mut rows = stmt.query(NO_PARAMS)?;
let mut graves = Graves::default();
while let Some(row) = rows.next()? {
let oid: i64 = row.get(0)?;
let kind = GraveKind::try_from(row.get::<_, u8>(1)?)
.map_err(|_| AnkiError::invalid_input("invalid grave kind"))?;
match kind {
GraveKind::Card => graves.cards.push(CardID(oid)),
GraveKind::Note => graves.notes.push(NoteID(oid)),
GraveKind::Deck => graves.decks.push(DeckID(oid)),
}
}
self.db
.prepare("update graves set usn=? where usn=-1")?
.execute(&[new_usn])?;
Ok(graves)
}
}

View file

@ -0,0 +1,28 @@
insert
or replace into notes (
id,
guid,
mid,
mod,
usn,
tags,
flds,
sfld,
csum,
flags,
data
)
values
(
?,
?,
?,
?,
?,
?,
?,
?,
?,
0,
""
)

View file

@ -1,10 +1,9 @@
select
id,
guid,
mid,
mod,
usn,
tags,
flds
from notes
where
id = ?
from notes

View file

@ -5,39 +5,44 @@ use crate::{
err::Result,
notes::{Note, NoteID},
notetype::NoteTypeID,
prelude::*,
sync::NoteEntry,
tags::{join_tags, split_tags},
timestamp::TimestampMillis,
};
use rusqlite::{params, OptionalExtension, NO_PARAMS};
use rusqlite::{params, Row, NO_PARAMS};
fn split_fields(fields: &str) -> Vec<String> {
pub(crate) fn split_fields(fields: &str) -> Vec<String> {
fields.split('\x1f').map(Into::into).collect()
}
fn join_fields(fields: &[String]) -> String {
pub(crate) fn join_fields(fields: &[String]) -> String {
fields.join("\x1f")
}
fn row_to_note(row: &Row) -> Result<Note> {
Ok(Note {
id: row.get(0)?,
guid: row.get(1)?,
ntid: row.get(2)?,
mtime: row.get(3)?,
usn: row.get(4)?,
tags: split_tags(row.get_raw(5).as_str()?)
.map(Into::into)
.collect(),
fields: split_fields(row.get_raw(6).as_str()?),
sort_field: None,
checksum: None,
})
}
impl super::SqliteStorage {
pub fn get_note(&self, nid: NoteID) -> Result<Option<Note>> {
let mut stmt = self.db.prepare_cached(include_str!("get.sql"))?;
stmt.query_row(params![nid], |row| {
Ok(Note {
id: nid,
guid: row.get(0)?,
ntid: row.get(1)?,
mtime: row.get(2)?,
usn: row.get(3)?,
tags: split_tags(row.get_raw(4).as_str()?)
.map(Into::into)
.collect(),
fields: split_fields(row.get_raw(5).as_str()?),
sort_field: None,
checksum: None,
})
})
.optional()
.map_err(Into::into)
self.db
.prepare_cached(concat!(include_str!("get.sql"), " where id = ?"))?
.query_and_then(params![nid], row_to_note)?
.next()
.transpose()
}
/// Caller must call note.prepare_for_update() prior to calling this.
@ -76,6 +81,24 @@ impl super::SqliteStorage {
Ok(())
}
/// Add or update the provided note, preserving ID. Used by the syncing code.
pub(crate) fn add_or_update_note(&self, note: &Note) -> Result<()> {
let now = TimestampMillis::now().0;
let mut stmt = self.db.prepare_cached(include_str!("add_or_update.sql"))?;
stmt.execute(params![
note.id,
note.guid,
note.ntid,
note.mtime,
note.usn,
join_tags(&note.tags),
join_fields(&note.fields()),
note.sort_field.as_ref().unwrap(),
note.checksum.unwrap(),
])?;
Ok(())
}
pub(crate) fn remove_note(&self, nid: NoteID) -> Result<()> {
self.db
.prepare_cached("delete from notes where id = ?")?
@ -110,4 +133,28 @@ impl super::SqliteStorage {
.query_and_then(params![csum, ntid, nid], |r| r.get(0).map_err(Into::into))?
.collect()
}
pub(crate) fn take_notes_pending_sync(
&self,
new_usn: Usn,
limit: usize,
) -> Result<Vec<NoteEntry>> {
let mut out = vec![];
if limit == 0 {
return Ok(out);
}
let entries: Vec<NoteEntry> = self
.db
.prepare_cached(concat!(include_str!("get.sql"), " where usn=-1 limit ?"))?
.query_and_then(&[limit as u32], |r| row_to_note(r).map(Into::into))?
.collect::<Result<_>>()?;
let ids: Vec<_> = entries.iter().map(|e| e.id).collect();
self.db
.prepare_cached("update notes set usn=? where usn=-1")?
.execute(&[new_usn])?;
Ok(entries)
}
}

View file

@ -0,0 +1,4 @@
insert
or replace into notetypes (id, name, mtime_secs, usn, config)
values
(?, ?, ?, ?, ?);

View file

@ -10,6 +10,7 @@ use crate::{
NoteTypeConfig,
},
notetype::{NoteType, NoteTypeID, NoteTypeSchema11},
prelude::*,
timestamp::TimestampMillis,
};
use prost::Message;
@ -225,6 +226,19 @@ impl SqliteStorage {
Ok(())
}
/// Used for syncing.
pub(crate) fn add_or_update_notetype(&self, nt: &NoteType) -> Result<()> {
let mut stmt = self.db.prepare_cached(include_str!("add_or_update.sql"))?;
let mut config_bytes = vec![];
nt.config.encode(&mut config_bytes)?;
stmt.execute(params![nt.id, nt.name, nt.mtime_secs, nt.usn, config_bytes])?;
self.update_notetype_fields(nt.id, &nt.fields)?;
self.update_notetype_templates(nt.id, &nt.templates)?;
Ok(())
}
pub(crate) fn remove_cards_for_deleted_templates(
&self,
ntid: NoteTypeID,
@ -315,6 +329,19 @@ and ord in ",
Ok(())
}
pub(crate) fn take_notetype_ids_pending_sync(&self, new_usn: Usn) -> Result<Vec<NoteTypeID>> {
let ids: Vec<NoteTypeID> = self
.db
.prepare("select id from notetypes where usn=-1")?
.query_and_then(NO_PARAMS, |r| r.get(0))?
.collect::<std::result::Result<_, rusqlite::Error>>()?;
self.db
.prepare("update notetypes set usn=? where usn=-1")?
.execute(&[new_usn])?;
Ok(ids)
}
// Upgrading/downgrading/legacy
pub(crate) fn get_all_notetypes_as_schema11(

View file

@ -0,0 +1,14 @@
insert
or ignore into revlog (
id,
cid,
usn,
ease,
ivl,
lastIvl,
factor,
time,
type
)
values
(?, ?, ?, ?, ?, ?, ?, ?, ?)

View file

@ -0,0 +1,11 @@
select
id,
cid,
usn,
ease,
ivl,
lastIvl,
factor,
time,
type
from revlog

View file

@ -2,8 +2,9 @@
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
use super::SqliteStorage;
use crate::err::Result;
use rusqlite::NO_PARAMS;
use crate::prelude::*;
use crate::{err::Result, sync::ReviewLogEntry};
use rusqlite::{params, NO_PARAMS};
impl SqliteStorage {
pub(crate) fn fix_revlog_properties(&self) -> Result<usize> {
@ -19,4 +20,57 @@ impl SqliteStorage {
.execute(NO_PARAMS)?;
Ok(())
}
pub(crate) fn add_revlog_entry(&self, entry: &ReviewLogEntry) -> Result<()> {
self.db
.prepare_cached(include_str!("add.sql"))?
.execute(params![
entry.id,
entry.cid,
entry.usn,
entry.ease,
entry.interval,
entry.last_interval,
entry.factor,
entry.time,
entry.kind
])?;
Ok(())
}
pub(crate) fn take_revlog_pending_sync(
&self,
new_usn: Usn,
limit: usize,
) -> Result<Vec<ReviewLogEntry>> {
let mut out = vec![];
if limit == 0 {
return Ok(out);
}
let entries: Vec<ReviewLogEntry> = self
.db
.prepare_cached(concat!(include_str!("get.sql"), " where usn=-1 limit ?"))?
.query_and_then(&[limit as u32], |row| {
Ok(ReviewLogEntry {
id: row.get(0)?,
cid: row.get(1)?,
usn: row.get(2)?,
ease: row.get(3)?,
interval: row.get(4)?,
last_interval: row.get(5)?,
factor: row.get(6)?,
time: row.get(7)?,
kind: row.get(8)?,
})
})?
.collect::<Result<_>>()?;
let ids: Vec<_> = entries.iter().map(|e| e.id).collect();
self.db
.prepare_cached("update revlog set usn=? where usn=-1")?
.execute(&[new_usn])?;
Ok(entries)
}
}

View file

@ -258,12 +258,25 @@ impl SqliteStorage {
//////////////////////////////////////////
pub(crate) fn mark_modified(&self) -> Result<()> {
self.set_modified_time(TimestampMillis::now())
}
pub(crate) fn set_modified_time(&self, stamp: TimestampMillis) -> Result<()> {
self.db
.prepare_cached("update col set mod=?")?
.execute(params![TimestampMillis::now()])?;
.execute(params![stamp])?;
Ok(())
}
pub(crate) fn get_modified_time(&self) -> Result<TimestampMillis> {
self.db
.prepare_cached("select mod from col")?
.query_and_then(NO_PARAMS, |r| r.get(0))?
.next()
.ok_or_else(|| AnkiError::invalid_input("missing col"))?
.map_err(Into::into)
}
pub(crate) fn usn(&self, server: bool) -> Result<Usn> {
if server {
Ok(Usn(self
@ -275,6 +288,13 @@ impl SqliteStorage {
}
}
pub(crate) fn set_usn(&self, usn: Usn) -> Result<()> {
self.db
.prepare_cached("update col set usn = ?")?
.execute(&[usn])?;
Ok(())
}
pub(crate) fn increment_usn(&self) -> Result<()> {
self.db
.prepare_cached("update col set usn = usn + 1")?
@ -303,7 +323,7 @@ impl SqliteStorage {
Ok(())
}
pub(crate) fn get_schema_mtime(&self) -> Result<TimestampSecs> {
pub(crate) fn get_schema_mtime(&self) -> Result<TimestampMillis> {
self.db
.prepare_cached("select scm from col")?
.query_and_then(NO_PARAMS, |r| r.get(0))?
@ -312,7 +332,7 @@ impl SqliteStorage {
.map_err(Into::into)
}
pub(crate) fn set_last_sync(&self, stamp: TimestampSecs) -> Result<()> {
pub(crate) fn set_last_sync(&self, stamp: TimestampMillis) -> Result<()> {
self.db
.prepare("update col set ls = ?")?
.execute(&[stamp])?;

View file

@ -45,7 +45,7 @@ impl SqliteStorage {
// fixme: in the future we could just register tags as part of the sync
// instead of sending the tag list separately
pub(crate) fn get_changed_tags(&self, usn: Usn) -> Result<Vec<String>> {
pub(crate) fn take_changed_tags(&self, usn: Usn) -> Result<Vec<String>> {
let tags: Vec<String> = self
.db
.prepare("select tag from tags where usn=-1")?

View file

@ -6,7 +6,10 @@ use bytes::Bytes;
use futures::Stream;
use reqwest::Body;
// fixme: 100mb limit
static SYNC_VERSION: u8 = 10;
pub struct HTTPSyncClient {
hkey: Option<String>,
skey: String,
@ -37,15 +40,15 @@ struct MetaIn<'a> {
#[derive(Serialize, Deserialize, Debug)]
struct StartIn {
#[serde(rename = "minUsn")]
minimum_usn: Usn,
local_usn: Usn,
#[serde(rename = "offset")]
minutes_west: i32,
minutes_west: Option<i32>,
// only used to modify behaviour of changes()
#[serde(rename = "lnewer")]
client_is_newer: bool,
local_is_newer: bool,
// used by 2.0 clients
#[serde(skip_serializing_if = "Option::is_none")]
client_graves: Option<Graves>,
local_graves: Option<Graves>,
}
#[derive(Serialize, Deserialize, Debug)]
@ -55,7 +58,7 @@ struct ApplyGravesIn {
#[derive(Serialize, Deserialize, Debug)]
struct ApplyChangesIn {
changes: Changes,
changes: UnchunkedChanges,
}
#[derive(Serialize, Deserialize, Debug)]
@ -72,7 +75,7 @@ struct SanityCheckIn {
struct Empty {}
impl HTTPSyncClient {
pub fn new<'a>(endpoint_suffix: &str) -> HTTPSyncClient {
pub fn new<'a>(hkey: Option<String>, endpoint_suffix: &str) -> HTTPSyncClient {
let client = Client::builder()
.connect_timeout(Duration::from_secs(30))
.timeout(Duration::from_secs(60))
@ -81,7 +84,7 @@ impl HTTPSyncClient {
let skey = guid();
let endpoint = endpoint(&endpoint_suffix);
HTTPSyncClient {
hkey: None,
hkey,
skey,
client,
endpoint,
@ -151,7 +154,7 @@ impl HTTPSyncClient {
self.hkey.as_ref().unwrap()
}
pub(crate) async fn meta(&self) -> Result<ServerMeta> {
pub(crate) async fn meta(&self) -> Result<SyncMeta> {
let meta_in = MetaIn {
sync_version: SYNC_VERSION,
client_version: sync_client_version(),
@ -161,15 +164,15 @@ impl HTTPSyncClient {
pub(crate) async fn start(
&self,
minimum_usn: Usn,
minutes_west: i32,
client_is_newer: bool,
local_usn: Usn,
minutes_west: Option<i32>,
local_is_newer: bool,
) -> Result<Graves> {
let input = StartIn {
minimum_usn,
local_usn: local_usn,
minutes_west,
client_is_newer,
client_graves: None,
local_is_newer: local_is_newer,
local_graves: None,
};
self.json_request_deserialized("start", &input).await
}
@ -181,7 +184,10 @@ impl HTTPSyncClient {
Ok(())
}
pub(crate) async fn apply_changes(&self, changes: Changes) -> Result<Changes> {
pub(crate) async fn apply_changes(
&self,
changes: UnchunkedChanges,
) -> Result<UnchunkedChanges> {
let input = ApplyChangesIn { changes };
self.json_request_deserialized("applyChanges", &input).await
}
@ -202,10 +208,8 @@ impl HTTPSyncClient {
self.json_request_deserialized("sanityCheck2", &input).await
}
pub(crate) async fn finish(&self) -> Result<()> {
let resp = self.json_request("finish", &Empty {}, false).await?;
resp.error_for_status()?;
Ok(())
pub(crate) async fn finish(&self) -> Result<TimestampMillis> {
Ok(self.json_request_deserialized("finish", &Empty {}).await?)
}
pub(crate) async fn abort(&self) -> Result<()> {
@ -330,7 +334,7 @@ mod test {
use tokio::runtime::Runtime;
async fn http_client_inner(username: String, password: String) -> Result<()> {
let mut syncer = HTTPSyncClient::new("");
let mut syncer = HTTPSyncClient::new(None, "");
assert!(matches!(
syncer.login("nosuchuser", "nosuchpass").await,
@ -353,17 +357,17 @@ mod test {
})
));
let _graves = syncer.start(Usn(1), 0, true).await?;
let _graves = syncer.start(Usn(1), None, true).await?;
// aborting should now work
syncer.abort().await?;
// start again, and continue
let _graves = syncer.start(Usn(0), 0, true).await?;
let _graves = syncer.start(Usn(1), None, true).await?;
syncer.apply_graves(Graves::default()).await?;
let _changes = syncer.apply_changes(Changes::default()).await?;
let _changes = syncer.apply_changes(UnchunkedChanges::default()).await?;
let _chunk = syncer.chunk().await?;
syncer
.apply_chunk(Chunk {

View file

@ -4,17 +4,21 @@
mod http_client;
use crate::{
card::{CardQueue, CardType},
card::{Card, CardQueue, CardType},
deckconf::DeckConfSchema11,
decks::DeckSchema11,
notes::guid,
notetype::NoteTypeSchema11,
err::SyncErrorKind,
notes::{guid, Note},
notetype::{NoteType, NoteTypeSchema11},
prelude::*,
tags::{join_tags, split_tags},
version::sync_client_version,
};
use flate2::write::GzEncoder;
use flate2::Compression;
use futures::StreamExt;
use http_client::HTTPSyncClient;
use itertools::Itertools;
use reqwest::{multipart, Client, Response};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use serde_json::Value;
@ -27,7 +31,7 @@ use tempfile::NamedTempFile;
pub struct SyncProgress {}
#[derive(Serialize, Deserialize, Debug)]
pub struct ServerMeta {
pub struct SyncMeta {
#[serde(rename = "mod")]
modified: TimestampMillis,
#[serde(rename = "scm")]
@ -45,9 +49,9 @@ pub struct ServerMeta {
#[derive(Serialize, Deserialize, Debug, Default)]
pub struct Graves {
cards: Vec<CardID>,
decks: Vec<DeckID>,
notes: Vec<NoteID>,
pub(crate) cards: Vec<CardID>,
pub(crate) decks: Vec<DeckID>,
pub(crate) notes: Vec<NoteID>,
}
#[derive(Serialize_tuple, Deserialize, Debug, Default)]
@ -57,7 +61,7 @@ pub struct DecksAndConfig {
}
#[derive(Serialize, Deserialize, Debug, Default)]
pub struct Changes {
pub struct UnchunkedChanges {
#[serde(rename = "models")]
notetypes: Vec<NoteTypeSchema11>,
#[serde(rename = "decks")]
@ -84,57 +88,57 @@ pub struct Chunk {
#[derive(Serialize_tuple, Deserialize, Debug)]
pub struct ReviewLogEntry {
id: TimestampMillis,
cid: CardID,
usn: Usn,
ease: u8,
pub id: TimestampMillis,
pub cid: CardID,
pub usn: Usn,
pub ease: u8,
#[serde(rename = "ivl")]
interval: i32,
pub interval: i32,
#[serde(rename = "lastIvl")]
last_interval: i32,
factor: u32,
time: u32,
pub last_interval: i32,
pub factor: u32,
pub time: u32,
#[serde(rename = "type")]
kind: u8,
pub kind: u8,
}
#[derive(Serialize_tuple, Deserialize, Debug)]
pub struct NoteEntry {
id: NoteID,
guid: String,
pub id: NoteID,
pub guid: String,
#[serde(rename = "mid")]
ntid: NoteTypeID,
pub ntid: NoteTypeID,
#[serde(rename = "mod")]
mtime: TimestampSecs,
usn: Usn,
tags: String,
fields: String,
sfld: String, // always empty
csum: String, // always empty
flags: u32,
data: String,
pub mtime: TimestampSecs,
pub usn: Usn,
pub tags: String,
pub fields: String,
pub sfld: String, // always empty
pub csum: String, // always empty
pub flags: u32,
pub data: String,
}
#[derive(Serialize_tuple, Deserialize, Debug)]
pub struct CardEntry {
id: CardID,
nid: NoteID,
did: DeckID,
ord: u16,
mtime: TimestampSecs,
usn: Usn,
ctype: CardType,
queue: CardQueue,
due: i32,
ivl: u32,
factor: u16,
reps: u32,
lapses: u32,
left: u32,
odue: i32,
odid: DeckID,
flags: u8,
data: String,
pub id: CardID,
pub nid: NoteID,
pub did: DeckID,
pub ord: u16,
pub mtime: TimestampSecs,
pub usn: Usn,
pub ctype: CardType,
pub queue: CardQueue,
pub due: i32,
pub ivl: u32,
pub factor: u16,
pub reps: u32,
pub lapses: u32,
pub left: u32,
pub odue: i32,
pub odid: DeckID,
pub flags: u8,
pub data: String,
}
#[derive(Serialize, Deserialize, Debug)]
@ -178,3 +182,567 @@ pub struct FullSyncProgress {
transferred_bytes: usize,
total_bytes: usize,
}
pub enum SyncState {
NoChanges,
FullSyncRequired,
NormalSyncRequired(NormalSyncMeta),
}
pub struct NormalSyncMeta {
local_is_newer: bool,
local_usn: Usn,
remote_usn: Usn,
server_message: String,
shard_number: u32,
}
struct SyncDriver<'a> {
col: &'a mut Collection,
remote: HTTPSyncClient,
}
impl SyncDriver<'_> {
async fn from_login<'a>(
col: &'a mut Collection,
username: &str,
password: &str,
) -> Result<SyncDriver<'a>> {
let mut remote = HTTPSyncClient::new(None, "");
remote.login(username, password).await?;
Ok(SyncDriver { col, remote })
}
fn from_hkey<'a>(
col: &'a mut Collection,
hkey: String,
endpoint_suffix: &str,
) -> SyncDriver<'a> {
SyncDriver {
col,
remote: HTTPSyncClient::new(Some(hkey), endpoint_suffix),
}
}
async fn get_sync_state(&self) -> Result<SyncState> {
let remote: SyncMeta = self.remote.meta().await?;
if !remote.should_continue {
return Err(AnkiError::SyncError {
info: remote.server_message,
kind: SyncErrorKind::ServerMessage,
});
}
let local = self.col.sync_meta()?;
if (remote.current_time.0 - local.current_time.0).abs() > 300 {
return Err(AnkiError::SyncError {
// fixme: need to rethink error handling; defer translation and pass in time difference
info: "".into(),
kind: SyncErrorKind::ClockIncorrect,
});
}
if remote.modified == local.modified {
return Ok(SyncState::NoChanges);
}
if remote.schema != local.schema {
return Ok(SyncState::FullSyncRequired);
}
Ok(SyncState::NormalSyncRequired(NormalSyncMeta {
local_is_newer: local.modified > remote.modified,
local_usn: local.usn,
remote_usn: remote.usn,
server_message: remote.server_message,
shard_number: remote.shard_number,
}))
}
/// Sync. Caller must have created a transaction, and should call
/// abort on
pub(crate) async fn sync(&mut self, meta: NormalSyncMeta) -> Result<()> {
self.col.basic_check_for_sync()?;
self.start_and_process_deletions(&meta).await?;
self.process_unchunked_changes(meta.remote_usn, meta.local_is_newer)
.await?;
self.process_chunks_from_server().await?;
self.send_chunks_to_server(meta.remote_usn).await?;
self.sanity_check().await?;
self.finalize(meta).await?;
Ok(())
}
/// Return the remote client for use in a full sync.
fn into_remote(self) -> HTTPSyncClient {
self.remote
}
// The following operations assume a transaction has been set up.
async fn start_and_process_deletions(&self, meta: &NormalSyncMeta) -> Result<()> {
let removed_on_remote = self
.remote
.start(
meta.local_usn,
self.col.get_local_mins_west(),
meta.local_is_newer,
)
.await?;
let mut locally_removed = self.col.storage.take_pending_graves(meta.remote_usn)?;
while let Some(chunk) = locally_removed.take_chunk() {
self.remote.apply_graves(chunk).await?;
}
self.col.apply_graves(removed_on_remote, meta.local_usn)?;
Ok(())
}
// This was assumed to a cheap operation when originally written - it didn't anticipate
// the large deck trees and note types some users would create. They should be chunked
// in the future, like other objects. Syncing tags explicitly is also probably of limited
// usefulness.
async fn process_unchunked_changes(&self, remote_usn: Usn, local_is_newer: bool) -> Result<()> {
let local_changes = self
.col
.local_unchunked_changes(remote_usn, local_is_newer)?;
let remote_changes = self.remote.apply_changes(local_changes).await?;
self.col.apply_changes(remote_changes, remote_usn)
}
async fn process_chunks_from_server(&mut self) -> Result<()> {
loop {
let chunk: Chunk = self.remote.chunk().await?;
let done = chunk.done;
self.col.apply_chunk(chunk)?;
if done {
return Ok(());
}
}
}
async fn send_chunks_to_server(&self, server_usn: Usn) -> Result<()> {
loop {
let chunk: Chunk = self.col.get_chunk(server_usn)?;
let done = chunk.done;
self.remote.apply_chunk(chunk).await?;
if done {
return Ok(());
}
}
}
/// Caller should force full sync after rolling back.
async fn sanity_check(&self) -> Result<()> {
let local_counts = self.col.sanity_check_info()?;
let out: SanityCheckOut = self.remote.sanity_check(local_counts).await?;
if out.status != SanityCheckStatus::Ok {
Err(AnkiError::SyncError {
info: String::new(),
kind: SyncErrorKind::DatabaseCheckRequired,
})
} else {
Ok(())
}
}
async fn finalize(&self, meta: NormalSyncMeta) -> Result<()> {
let new_server_mtime = self.remote.finish().await?;
self.col.finalize_sync(meta, new_server_mtime)
}
}
const CHUNK_SIZE: usize = 250;
impl Graves {
fn take_chunk(&mut self) -> Option<Graves> {
let mut limit = CHUNK_SIZE;
let mut out = Graves::default();
while limit > 0 && !self.cards.is_empty() {
out.cards.push(self.cards.pop().unwrap());
limit -= 1;
}
while limit > 0 && !self.notes.is_empty() {
out.notes.push(self.notes.pop().unwrap());
limit -= 1;
}
while limit > 0 && !self.decks.is_empty() {
out.decks.push(self.decks.pop().unwrap());
limit -= 1;
}
if limit == CHUNK_SIZE {
None
} else {
Some(out)
}
}
}
impl Collection {
fn sync_meta(&self) -> Result<SyncMeta> {
Ok(SyncMeta {
modified: self.storage.get_modified_time()?,
schema: self.storage.get_schema_mtime()?,
usn: self.storage.usn(true)?,
current_time: TimestampSecs::now(),
server_message: "".into(),
should_continue: true,
shard_number: 0,
})
}
fn basic_check_for_sync(&self) -> Result<()> {
todo!();
}
fn apply_graves(&self, graves: Graves, local_usn: Usn) -> Result<()> {
for nid in graves.notes {
self.storage.remove_note(nid)?;
self.storage.add_note_grave(nid, local_usn)?;
}
for cid in graves.cards {
self.storage.remove_card(cid)?;
self.storage.add_card_grave(cid, local_usn)?;
}
for did in graves.decks {
self.storage.remove_deck(did)?;
self.storage.add_deck_grave(did, local_usn)?;
}
Ok(())
}
// Local->remote unchunked changes
//----------------------------------------------------------------
fn local_unchunked_changes(
&self,
remote_usn: Usn,
local_is_newer: bool,
) -> Result<UnchunkedChanges> {
let mut changes = UnchunkedChanges {
notetypes: self.changed_notetypes(remote_usn)?,
decks_and_config: DecksAndConfig {
decks: self.changed_decks(remote_usn)?,
config: self.changed_deck_config(remote_usn)?,
},
tags: self.changed_tags(remote_usn)?,
..Default::default()
};
if local_is_newer {
changes.config = Some(todo!());
changes.creation_stamp = Some(self.storage.creation_stamp()?);
}
Ok(changes)
}
fn changed_notetypes(&self, new_usn: Usn) -> Result<Vec<NoteTypeSchema11>> {
let ids = self.storage.take_notetype_ids_pending_sync(new_usn)?;
ids.into_iter()
.map(|id| self.storage.get_notetype(id).map(|opt| opt.unwrap().into()))
.collect()
}
fn changed_decks(&self, new_usn: Usn) -> Result<Vec<DeckSchema11>> {
let ids = self.storage.take_deck_ids_pending_sync(new_usn)?;
ids.into_iter()
.map(|id| self.storage.get_deck(id).map(|opt| opt.unwrap().into()))
.collect()
}
fn changed_deck_config(&self, new_usn: Usn) -> Result<Vec<DeckConfSchema11>> {
let ids = self.storage.take_deck_config_ids_pending_sync(new_usn)?;
ids.into_iter()
.map(|id| {
self.storage
.get_deck_config(id)
.map(|opt| opt.unwrap().into())
})
.collect()
}
fn changed_tags(&self, new_usn: Usn) -> Result<Vec<String>> {
self.storage.take_changed_tags(new_usn)
}
/// Currently this is all config, as legacy clients overwrite the local items
/// with the provided value.
fn changed_config(&self, new_usn: Usn) -> Result<HashMap<String, Value>> {
let conf = self.storage.get_all_config()?;
self.storage.clear_config_usns()?;
Ok(conf)
}
// Remote->local unchunked changes
//----------------------------------------------------------------
fn apply_changes(&self, remote: UnchunkedChanges, remote_usn: Usn) -> Result<()> {
self.merge_notetypes(remote.notetypes)?;
self.merge_decks(remote.decks_and_config.decks)?;
self.merge_deck_config(remote.decks_and_config.config)?;
self.merge_tags(remote.tags, remote_usn)?;
if let Some(crt) = remote.creation_stamp {
self.storage.set_creation_stamp(crt)?;
}
if let Some(config) = remote.config {
self.storage
.set_all_config(config, remote_usn, TimestampSecs::now())?;
}
Ok(())
}
fn merge_notetypes(&self, notetypes: Vec<NoteTypeSchema11>) -> Result<()> {
for mut nt in notetypes {
let nt: NoteType = nt.into();
let proceed = if let Some(existing_nt) = self.storage.get_notetype(nt.id)? {
if existing_nt.mtime_secs < nt.mtime_secs {
if (existing_nt.fields.len() != nt.fields.len())
|| (existing_nt.templates.len() != nt.templates.len())
{
return Err(AnkiError::SyncError {
info: "notetype schema changed".into(),
kind: SyncErrorKind::ResyncRequired,
});
}
true
} else {
false
}
} else {
true
};
if proceed {
self.storage.add_or_update_notetype(&nt)?;
}
}
Ok(())
}
fn merge_decks(&self, decks: Vec<DeckSchema11>) -> Result<()> {
for mut deck in decks {
let proceed = if let Some(existing_deck) = self.storage.get_deck(deck.id())? {
existing_deck.mtime_secs < deck.common().mtime
} else {
true
};
if proceed {
let deck = deck.into();
self.storage.add_or_update_deck(&deck)?;
}
}
Ok(())
}
fn merge_deck_config(&self, dconf: Vec<DeckConfSchema11>) -> Result<()> {
for mut conf in dconf {
let proceed = if let Some(existing_conf) = self.storage.get_deck_config(conf.id)? {
existing_conf.mtime_secs < conf.mtime
} else {
true
};
if proceed {
let conf = conf.into();
self.storage.add_or_update_deck_config(&conf)?;
}
}
Ok(())
}
fn merge_tags(&self, tags: Vec<String>, new_usn: Usn) -> Result<()> {
for tag in tags {
self.register_tag(&tag, new_usn)?;
}
Ok(())
}
// Remote->local chunks
//----------------------------------------------------------------
fn apply_chunk(&mut self, chunk: Chunk) -> Result<()> {
self.merge_revlog(chunk.revlog)?;
self.merge_cards(chunk.cards)?;
self.merge_notes(chunk.notes)
}
fn merge_revlog(&self, entries: Vec<ReviewLogEntry>) -> Result<()> {
for entry in entries {
self.storage.add_revlog_entry(&entry)?;
}
Ok(())
}
fn merge_cards(&self, entries: Vec<CardEntry>) -> Result<()> {
for entry in entries {
self.add_or_update_card_if_newer(entry)?;
}
Ok(())
}
fn add_or_update_card_if_newer(&self, entry: CardEntry) -> Result<()> {
let proceed = if let Some(existing_card) = self.storage.get_card(entry.id)? {
existing_card.mtime < entry.mtime
} else {
true
};
if proceed {
let card = entry.into();
self.storage.add_or_update_card(&card)?;
}
Ok(())
}
fn merge_notes(&mut self, entries: Vec<NoteEntry>) -> Result<()> {
for entry in entries {
self.add_or_update_note_if_newer(entry)?;
}
Ok(())
}
fn add_or_update_note_if_newer(&mut self, entry: NoteEntry) -> Result<()> {
let proceed = if let Some(existing_note) = self.storage.get_note(entry.id)? {
existing_note.mtime < entry.mtime
} else {
true
};
if proceed {
let mut note: Note = entry.into();
let nt = self
.get_notetype(note.ntid)?
.ok_or(AnkiError::invalid_input("note missing notetype"))?;
note.prepare_for_update(&nt, false)?;
self.storage.add_or_update_note(&note)?;
}
Ok(())
}
// Local->remote chunks
//----------------------------------------------------------------
fn get_chunk(&self, server_usn: Usn) -> Result<Chunk> {
let mut chunk = Chunk::default();
chunk.revlog = self
.storage
.take_revlog_pending_sync(server_usn, CHUNK_SIZE)?;
chunk.cards = self
.storage
.take_cards_pending_sync(server_usn, CHUNK_SIZE)?;
if !chunk.revlog.is_empty() || !chunk.cards.is_empty() {
return Ok(chunk);
}
chunk.notes = self
.storage
.take_notes_pending_sync(server_usn, CHUNK_SIZE)?;
if chunk.notes.is_empty() {
chunk.done = true;
}
Ok(chunk)
}
// Final steps
//----------------------------------------------------------------
fn sanity_check_info(&self) -> Result<SanityCheckCounts> {
self.basic_check_for_sync()?;
todo!();
}
fn finalize_sync(&self, meta: NormalSyncMeta, new_server_mtime: TimestampMillis) -> Result<()> {
self.storage.set_last_sync(new_server_mtime)?;
let mut usn = meta.remote_usn;
usn.0 += 1;
self.storage.set_usn(usn)?;
self.storage.set_modified_time(new_server_mtime)
}
}
impl From<CardEntry> for Card {
fn from(e: CardEntry) -> Self {
Card {
id: e.id,
nid: e.nid,
did: e.did,
ord: e.ord,
mtime: e.mtime,
usn: e.usn,
ctype: e.ctype,
queue: e.queue,
due: e.due,
ivl: e.ivl,
factor: e.factor,
reps: e.reps,
lapses: e.lapses,
left: e.left,
odue: e.odue,
odid: e.odid,
flags: e.flags,
data: e.data,
}
}
}
impl From<Card> for CardEntry {
fn from(e: Card) -> Self {
CardEntry {
id: e.id,
nid: e.nid,
did: e.did,
ord: e.ord,
mtime: e.mtime,
usn: e.usn,
ctype: e.ctype,
queue: e.queue,
due: e.due,
ivl: e.ivl,
factor: e.factor,
reps: e.reps,
lapses: e.lapses,
left: e.left,
odue: e.odue,
odid: e.odid,
flags: e.flags,
data: e.data,
}
}
}
impl From<NoteEntry> for Note {
fn from(e: NoteEntry) -> Self {
Note {
id: e.id,
guid: e.guid,
ntid: e.ntid,
mtime: e.mtime,
usn: e.usn,
tags: split_tags(&e.tags).map(ToString::to_string).collect(),
fields: e.fields.split('\x1f').map(ToString::to_string).collect(),
sort_field: None,
checksum: None,
}
}
}
impl From<Note> for NoteEntry {
fn from(e: Note) -> Self {
NoteEntry {
id: e.id,
guid: e.guid,
ntid: e.ntid,
mtime: e.mtime,
usn: e.usn,
tags: join_tags(&e.tags),
fields: e.fields.into_iter().join("\x1f"),
sfld: String::new(),
csum: String::new(),
flags: 0,
data: String::new(),
}
}
}

View file

@ -97,7 +97,6 @@ fn want_release_gil(method: u32) -> bool {
BackendMethod::FormatTimespan => false,
BackendMethod::RegisterTags => true,
BackendMethod::AllTags => true,
BackendMethod::GetChangedTags => true,
BackendMethod::GetConfigJson => true,
BackendMethod::SetConfigJson => true,
BackendMethod::RemoveConfig => true,
@ -106,6 +105,8 @@ fn want_release_gil(method: u32) -> bool {
BackendMethod::GetPreferences => true,
BackendMethod::SetPreferences => true,
BackendMethod::NoteIsDuplicateOrEmpty => true,
BackendMethod::SyncLogin => true,
BackendMethod::SyncCollection => true,
}
} else {
false