mirror of
https://github.com/ankitects/anki.git
synced 2025-09-18 22:12:21 -04:00

This PR replaces the existing Python-driven sync server with a new one in Rust. The new server supports both collection and media syncing, and is compatible with both the new protocol mentioned below, and older clients. A setting has been added to the preferences screen to point Anki to a local server, and a similar setting is likely to come to AnkiMobile soon. Documentation is available here: <https://docs.ankiweb.net/sync-server.html> In addition to the new server and refactoring, this PR also makes changes to the sync protocol. The existing sync protocol places payloads and metadata inside a multipart POST body, which causes a few headaches: - Legacy clients build the request in a non-deterministic order, meaning the entire request needs to be scanned to extract the metadata. - Reqwest's multipart API directly writes the multipart body, without exposing the resulting stream to us, making it harder to track the progress of the transfer. We've been relying on a patched version of reqwest for timeouts, which is a pain to keep up to date. To address these issues, the metadata is now sent in a HTTP header, with the data payload sent directly in the body. Instead of the slower gzip, we now use zstd. The old timeout handling code has been replaced with a new implementation that wraps the request and response body streams to track progress, allowing us to drop the git dependencies for reqwest, hyper-timeout and tokio-io-timeout. The main other change to the protocol is that one-way syncs no longer need to downgrade the collection to schema 11 prior to sending.
103 lines
3.2 KiB
Rust
103 lines
3.2 KiB
Rust
// Copyright: Ankitects Pty Ltd and contributors
|
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
|
|
|
#![cfg(test)]
|
|
|
|
use std::path::Path;
|
|
|
|
use tempfile::tempdir;
|
|
|
|
use crate::{
|
|
collection::CollectionBuilder,
|
|
import_export::package::import_colpkg,
|
|
io::{create_dir, create_dir_all, read_file},
|
|
media::MediaManager,
|
|
prelude::*,
|
|
};
|
|
|
|
fn collection_with_media(dir: &Path, name: &str) -> Result<Collection> {
|
|
let name = format!("{name}_src");
|
|
let media_folder = dir.join(format!("{name}.media"));
|
|
create_dir(&media_folder)?;
|
|
// add collection with sentinel note
|
|
let mut col = CollectionBuilder::new(dir.join(format!("{name}.anki2")))
|
|
.set_media_paths(media_folder, dir.join(format!("{name}.mdb")))
|
|
.build()?;
|
|
let nt = col.get_notetype_by_name("Basic")?.unwrap();
|
|
let mut note = nt.new_note();
|
|
col.add_note(&mut note, DeckId(1))?;
|
|
// add sample media
|
|
let mgr = col.media()?;
|
|
mgr.add_file("1", b"1")?;
|
|
mgr.add_file("2", b"2")?;
|
|
mgr.add_file("3", b"3")?;
|
|
Ok(col)
|
|
}
|
|
|
|
#[test]
|
|
fn roundtrip() -> Result<()> {
|
|
let _dir = tempdir()?;
|
|
let dir = _dir.path();
|
|
|
|
for (legacy, name) in [(true, "legacy"), (false, "v3")] {
|
|
// export to a file
|
|
let col = collection_with_media(dir, name)?;
|
|
let colpkg_name = dir.join(format!("{name}.colpkg"));
|
|
col.export_colpkg(&colpkg_name, true, legacy, |_, _| true)?;
|
|
|
|
// import into a new collection
|
|
let anki2_name = dir
|
|
.join(format!("{name}.anki2"))
|
|
.to_string_lossy()
|
|
.into_owned();
|
|
let import_media_dir = dir.join(format!("{name}.media"));
|
|
create_dir_all(&import_media_dir)?;
|
|
let import_media_db = dir.join(format!("{name}.mdb"));
|
|
MediaManager::new(&import_media_dir, &import_media_db)?;
|
|
import_colpkg(
|
|
&colpkg_name.to_string_lossy(),
|
|
&anki2_name,
|
|
&import_media_dir,
|
|
&import_media_db,
|
|
|_, _| true,
|
|
)?;
|
|
|
|
// confirm collection imported
|
|
let col = CollectionBuilder::new(&anki2_name).build()?;
|
|
assert_eq!(
|
|
col.storage.db_scalar::<i32>("select count() from notes")?,
|
|
1
|
|
);
|
|
// confirm media imported correctly
|
|
assert_eq!(read_file(import_media_dir.join("1"))?, b"1");
|
|
assert_eq!(read_file(import_media_dir.join("2"))?, b"2");
|
|
assert_eq!(read_file(import_media_dir.join("3"))?, b"3");
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Files with an invalid encoding should prevent export, except
|
|
/// on Apple platforms where the encoding is transparently changed.
|
|
#[test]
|
|
#[cfg(not(target_vendor = "apple"))]
|
|
fn normalization_check_on_export() -> Result<()> {
|
|
use crate::io::write_file;
|
|
|
|
let _dir = tempdir()?;
|
|
let dir = _dir.path();
|
|
|
|
let col = collection_with_media(dir, "normalize")?;
|
|
let colpkg_name = dir.join("normalize.colpkg");
|
|
// manually write a file in the wrong encoding.
|
|
write_file(col.media_folder.join("ぱぱ.jpg"), "nfd encoding")?;
|
|
assert_eq!(
|
|
col.export_colpkg(&colpkg_name, true, false, |_, _| true,)
|
|
.unwrap_err(),
|
|
AnkiError::MediaCheckRequired
|
|
);
|
|
// file should have been cleaned up
|
|
assert!(!colpkg_name.exists());
|
|
|
|
Ok(())
|
|
}
|