mirror of
https://github.com/ankitects/anki.git
synced 2025-09-18 22:12:21 -04:00

* Accept iterables as inputs to backend methods * Shift add-on check to backend; use new endpoint The new endpoint will return info on a suitable branch if found, instead of returning all branches. This simplifies the frontend code, and means that you can now drop support for certain versions without it also remotely disabling the add-on for people who are running one of the excluded versions, like in https://forums.ankiweb.net/t/prevent-add-ons-from-being-disabled-remote-stealthily-surreptitiously/33427 * Bump version to 23.09 This changes Anki's version numbering system to year.month.patch, as previously mentioned on https://forums.ankiweb.net/t/use-a-different-versioning-system-semver-perhaps/20046/5 This is shaping up to be a big release, with the introduction of FSRS and image occlusion, and it seems like a good time to be finally updating the version scheme as well. AnkiWeb has been updated to understand the new format, and add-on authors will now specify version compatibility using the full version number, as can be seen here: https://ankiweb.net/shared/info/3918629684 * Shift update check to backend, and tidy up update.py * Use the shared client for sync connections too
140 lines
4.5 KiB
Rust
140 lines
4.5 KiB
Rust
// Copyright: Ankitects Pty Ltd and contributors
|
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
|
|
|
use std::fs;
|
|
use std::io::Write;
|
|
|
|
use anki_io::atomic_rename;
|
|
use anki_io::new_tempfile_in_parent_of;
|
|
use anki_io::write_file;
|
|
use axum::response::IntoResponse;
|
|
use axum::response::Response;
|
|
use flate2::write::GzEncoder;
|
|
use flate2::Compression;
|
|
use futures::StreamExt;
|
|
use reqwest::Client;
|
|
use tokio_util::io::ReaderStream;
|
|
|
|
use crate::collection::CollectionBuilder;
|
|
use crate::error::SyncErrorKind;
|
|
use crate::prelude::*;
|
|
use crate::storage::SchemaVersion;
|
|
use crate::sync::error::HttpResult;
|
|
use crate::sync::error::OrHttpErr;
|
|
use crate::sync::http_client::HttpSyncClient;
|
|
use crate::sync::login::SyncAuth;
|
|
use crate::sync::request::IntoSyncRequest;
|
|
use crate::sync::request::MAXIMUM_SYNC_PAYLOAD_BYTES_UNCOMPRESSED;
|
|
|
|
/// Old clients didn't display a useful message on HTTP 400, and were expected
|
|
/// to show the error message returned by the server.
|
|
pub const CORRUPT_MESSAGE: &str =
|
|
"Your upload was corrupt. Please use Check Database, or restore from backup.";
|
|
|
|
impl Collection {
|
|
/// Upload collection to AnkiWeb. Caller must re-open afterwards.
|
|
pub async fn full_upload(self, auth: SyncAuth, client: Client) -> Result<()> {
|
|
self.full_upload_with_server(HttpSyncClient::new(auth, client))
|
|
.await
|
|
}
|
|
|
|
// pub for tests
|
|
pub(super) async fn full_upload_with_server(mut self, server: HttpSyncClient) -> Result<()> {
|
|
self.before_upload()?;
|
|
let col_path = self.col_path.clone();
|
|
let progress = self.new_progress_handler();
|
|
self.close(Some(SchemaVersion::V18))?;
|
|
let col_data = fs::read(&col_path)?;
|
|
|
|
let total_bytes = col_data.len();
|
|
if server.endpoint.as_str().contains("ankiweb") {
|
|
check_upload_limit(
|
|
total_bytes,
|
|
*MAXIMUM_SYNC_PAYLOAD_BYTES_UNCOMPRESSED as usize,
|
|
)?;
|
|
}
|
|
|
|
match server
|
|
.upload_with_progress(col_data.try_into_sync_request()?, progress)
|
|
.await?
|
|
.upload_response()
|
|
{
|
|
UploadResponse::Ok => Ok(()),
|
|
UploadResponse::Err(msg) => {
|
|
Err(AnkiError::sync_error(msg, SyncErrorKind::ServerMessage))
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Collection must already be open, and will be replaced on success.
|
|
pub fn handle_received_upload(
|
|
col: &mut Option<Collection>,
|
|
new_data: Vec<u8>,
|
|
) -> HttpResult<UploadResponse> {
|
|
let max_bytes = *MAXIMUM_SYNC_PAYLOAD_BYTES_UNCOMPRESSED as usize;
|
|
if new_data.len() >= max_bytes {
|
|
return Ok(UploadResponse::Err("collection exceeds size limit".into()));
|
|
}
|
|
let path = col
|
|
.as_ref()
|
|
.or_internal_err("col was closed")?
|
|
.col_path
|
|
.clone();
|
|
// write to temp file
|
|
let temp_file = new_tempfile_in_parent_of(&path).or_internal_err("temp file")?;
|
|
write_file(temp_file.path(), &new_data).or_internal_err("temp file")?;
|
|
// check the collection is valid
|
|
if let Err(err) = CollectionBuilder::new(temp_file.path())
|
|
.set_check_integrity(true)
|
|
.build()
|
|
{
|
|
tracing::info!(?err, "uploaded file was corrupt/failed to open");
|
|
return Ok(UploadResponse::Err(CORRUPT_MESSAGE.into()));
|
|
}
|
|
// close collection and rename
|
|
if let Some(col) = col.take() {
|
|
col.close(None)
|
|
.or_internal_err("closing current collection")?;
|
|
}
|
|
atomic_rename(temp_file, &path, true).or_internal_err("rename upload")?;
|
|
Ok(UploadResponse::Ok)
|
|
}
|
|
|
|
impl IntoResponse for UploadResponse {
|
|
fn into_response(self) -> Response {
|
|
match self {
|
|
// the legacy protocol expects this exact string
|
|
UploadResponse::Ok => "OK".to_string(),
|
|
UploadResponse::Err(e) => e,
|
|
}
|
|
.into_response()
|
|
}
|
|
}
|
|
|
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
pub enum UploadResponse {
|
|
Ok,
|
|
Err(String),
|
|
}
|
|
|
|
pub fn check_upload_limit(size: usize, limit: usize) -> Result<()> {
|
|
if size >= limit {
|
|
Err(AnkiError::sync_error(
|
|
format!("{size} > {limit}"),
|
|
SyncErrorKind::UploadTooLarge,
|
|
))
|
|
} else {
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
pub async fn gzipped_data_from_vec(vec: Vec<u8>) -> Result<Vec<u8>> {
|
|
let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
|
|
let mut stream = ReaderStream::new(&vec[..]);
|
|
while let Some(chunk) = stream.next().await {
|
|
let chunk = chunk?;
|
|
encoder.write_all(&chunk)?;
|
|
}
|
|
encoder.finish().map_err(Into::into)
|
|
}
|