add back in missing check for upload size

Compression now happens up-front, so we can tell in advance if the
upload size has been exceeded.
This commit is contained in:
Damien Elmes 2021-06-29 10:31:41 +10:00
parent e3c9808b79
commit 576b141e2b
11 changed files with 55 additions and 139 deletions

14
Cargo.lock generated
View file

@ -56,7 +56,6 @@ dependencies = [
"ammonia", "ammonia",
"anki_i18n", "anki_i18n",
"askama", "askama",
"async-compression",
"async-trait", "async-trait",
"blake3", "blake3",
"bytes", "bytes",
@ -208,19 +207,6 @@ dependencies = [
"toml", "toml",
] ]
[[package]]
name = "async-compression"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5443ccbb270374a2b1055fc72da40e1f237809cd6bb0e97e66d264cd138473a6"
dependencies = [
"flate2",
"futures-core",
"memchr",
"pin-project-lite",
"tokio",
]
[[package]] [[package]]
name = "async-trait" name = "async-trait"
version = "0.1.50" version = "0.1.50"

View file

@ -151,16 +151,6 @@ def raze_fetch_remote_crates():
build_file = Label("//cargo/remote:BUILD.askama_shared-0.11.1.bazel"), build_file = Label("//cargo/remote:BUILD.askama_shared-0.11.1.bazel"),
) )
maybe(
http_archive,
name = "raze__async_compression__0_3_8",
url = "https://crates.io/api/v1/crates/async-compression/0.3.8/download",
type = "tar.gz",
sha256 = "5443ccbb270374a2b1055fc72da40e1f237809cd6bb0e97e66d264cd138473a6",
strip_prefix = "async-compression-0.3.8",
build_file = Label("//cargo/remote:BUILD.async-compression-0.3.8.bazel"),
)
maybe( maybe(
http_archive, http_archive,
name = "raze__async_trait__0_1_50", name = "raze__async_trait__0_1_50",

View file

@ -152,15 +152,6 @@
"license_file": null, "license_file": null,
"description": "Shared code for Askama" "description": "Shared code for Askama"
}, },
{
"name": "async-compression",
"version": "0.3.8",
"authors": "Wim Looman <wim@nemo157.com>|Allen Bui <fairingrey@gmail.com>",
"repository": "https://github.com/Nemo157/async-compression",
"license": "Apache-2.0 OR MIT",
"license_file": null,
"description": "Adaptors between compression crates and Rust's modern asynchronous IO types."
},
{ {
"name": "async-trait", "name": "async-trait",
"version": "0.1.50", "version": "0.1.50",

View file

@ -1,84 +0,0 @@
"""
@generated
cargo-raze crate build file.
DO NOT EDIT! Replaced on runs of cargo-raze
"""
# buildifier: disable=load
load("@bazel_skylib//lib:selects.bzl", "selects")
# buildifier: disable=load
load(
"@rules_rust//rust:rust.bzl",
"rust_binary",
"rust_library",
"rust_test",
)
package(default_visibility = [
# Public for visibility by "@raze__crate__version//" targets.
#
# Prefer access through "//cargo", which limits external
# visibility to explicit Cargo.toml dependencies.
"//visibility:public",
])
licenses([
"notice", # MIT from expression "MIT OR Apache-2.0"
])
# Generated Targets
# Unsupported target "zlib_tokio_02_write" with type "example" omitted
# Unsupported target "zstd_gzip" with type "example" omitted
rust_library(
name = "async_compression",
srcs = glob(["**/*.rs"]),
crate_features = [
"default",
"flate2",
"gzip",
"tokio",
],
crate_root = "src/lib.rs",
crate_type = "lib",
data = [],
edition = "2018",
rustc_flags = [
"--cap-lints=allow",
],
tags = [
"cargo-raze",
"manual",
],
version = "0.3.8",
# buildifier: leave-alone
deps = [
"@raze__flate2__1_0_20//:flate2",
"@raze__futures_core__0_3_15//:futures_core",
"@raze__memchr__2_4_0//:memchr",
"@raze__pin_project_lite__0_2_6//:pin_project_lite",
"@raze__tokio__1_7_1//:tokio",
],
)
# Unsupported target "brotli" with type "test" omitted
# Unsupported target "bzip2" with type "test" omitted
# Unsupported target "deflate" with type "test" omitted
# Unsupported target "gzip" with type "test" omitted
# Unsupported target "lzma" with type "test" omitted
# Unsupported target "proptest" with type "test" omitted
# Unsupported target "xz" with type "test" omitted
# Unsupported target "zlib" with type "test" omitted
# Unsupported target "zstd" with type "test" omitted

View file

@ -41,6 +41,10 @@ sync-account-required =
A free account is required to keep your collection synchronized. Please <a href="{ $link }">sign up</a> for an account, then enter your details below. A free account is required to keep your collection synchronized. Please <a href="{ $link }">sign up</a> for an account, then enter your details below.
sync-sanity-check-failed = Please use the Check Database function, then sync again. If problems persist, please force a full sync in the preferences screen. sync-sanity-check-failed = Please use the Check Database function, then sync again. If problems persist, please force a full sync in the preferences screen.
sync-clock-off = Unable to sync - your clock is not set to the correct time. sync-clock-off = Unable to sync - your clock is not set to the correct time.
sync-upload-too-large =
Your collection file is too large to send to AnkiWeb. You can reduce its
size by removing any unwanted decks (optionally exporting them first), and
then using Check Database to shrink the file size down. ({ $details })
## Buttons ## Buttons

View file

@ -75,7 +75,6 @@ rust_library(
":build_script", ":build_script",
"//rslib/cargo:ammonia", "//rslib/cargo:ammonia",
"//rslib/cargo:askama", "//rslib/cargo:askama",
"//rslib/cargo:async_compression",
"//rslib/cargo:blake3", "//rslib/cargo:blake3",
"//rslib/cargo:bytes", "//rslib/cargo:bytes",
"//rslib/cargo:chrono", "//rslib/cargo:chrono",

View file

@ -37,7 +37,6 @@ slog-term = "=2.6.0"
anki_i18n = { path="i18n" } anki_i18n = { path="i18n" }
askama = "0.10.5" askama = "0.10.5"
async-compression = { version = "0.3.8", features = ["tokio", "gzip"] }
blake3 = "0.3.8" blake3 = "0.3.8"
bytes = "1.0.1" bytes = "1.0.1"
chrono = "0.4.19" chrono = "0.4.19"

View file

@ -30,15 +30,6 @@ alias(
], ],
) )
alias(
name = "async_compression",
actual = "@raze__async_compression__0_3_8//:async_compression",
tags = [
"cargo-raze",
"manual",
],
)
alias( alias(
name = "async_trait", name = "async_trait",
actual = "@raze__async_trait__0_1_50//:async_trait", actual = "@raze__async_trait__0_1_50//:async_trait",

View file

@ -38,6 +38,7 @@ pub enum SyncErrorKind {
ResyncRequired, ResyncRequired,
DatabaseCheckRequired, DatabaseCheckRequired,
SyncNotStarted, SyncNotStarted,
UploadTooLarge,
} }
impl AnkiError { impl AnkiError {
@ -152,6 +153,7 @@ impl SyncError {
SyncErrorKind::ClockIncorrect => tr.sync_clock_off(), SyncErrorKind::ClockIncorrect => tr.sync_clock_off(),
SyncErrorKind::DatabaseCheckRequired => tr.sync_sanity_check_failed(), SyncErrorKind::DatabaseCheckRequired => tr.sync_sanity_check_failed(),
SyncErrorKind::SyncNotStarted => "sync not started".into(), SyncErrorKind::SyncNotStarted => "sync not started".into(),
SyncErrorKind::UploadTooLarge => tr.sync_upload_too_large(&self.info),
} }
.into() .into()
} }

View file

@ -1,15 +1,23 @@
// Copyright: Ankitects Pty Ltd and contributors // Copyright: Ankitects Pty Ltd and contributors
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html // License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
use std::{io::prelude::*, mem::MaybeUninit, path::Path, time::Duration}; use std::{
env,
io::{prelude::*, Cursor},
mem::MaybeUninit,
path::Path,
time::Duration,
};
use async_trait::async_trait; use async_trait::async_trait;
use bytes::Bytes; use bytes::Bytes;
use flate2::{write::GzEncoder, Compression}; use flate2::{write::GzEncoder, Compression};
use futures::{Stream, StreamExt}; use futures::{Stream, StreamExt};
use lazy_static::lazy_static;
use reqwest::{multipart, Body, Client, Response}; use reqwest::{multipart, Body, Client, Response};
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
use tempfile::NamedTempFile; use tempfile::NamedTempFile;
use tokio_util::io::ReaderStream;
use super::{ use super::{
http::{ http::{
@ -22,7 +30,16 @@ use super::{
}; };
use crate::{error::SyncErrorKind, notes::guid, prelude::*, version::sync_client_version}; use crate::{error::SyncErrorKind, notes::guid, prelude::*, version::sync_client_version};
// fixme: 100mb limit lazy_static! {
// These limits are enforced server-side, but are made adjustable for users
// who are using a custom sync server.
static ref MAXIMUM_UPLOAD_MEGS_UNCOMPRESSED: usize = env::var("MAX_UPLOAD_MEGS_UNCOMP")
.map(|v| v.parse().expect("invalid upload limit"))
.unwrap_or(250);
static ref MAXIMUM_UPLOAD_MEGS_COMPRESSED: usize = env::var("MAX_UPLOAD_MEGS_COMP")
.map(|v| v.parse().expect("invalid upload limit"))
.unwrap_or(100);
}
pub type FullSyncProgressFn = Box<dyn FnMut(FullSyncProgress, bool) + Send + Sync + 'static>; pub type FullSyncProgressFn = Box<dyn FnMut(FullSyncProgress, bool) + Send + Sync + 'static>;
@ -119,23 +136,23 @@ impl SyncServer for HttpSyncClient {
async fn full_upload(mut self: Box<Self>, col_path: &Path, _can_consume: bool) -> Result<()> { async fn full_upload(mut self: Box<Self>, col_path: &Path, _can_consume: bool) -> Result<()> {
let file = tokio::fs::File::open(col_path).await?; let file = tokio::fs::File::open(col_path).await?;
let total_bytes = file.metadata().await?.len() as usize; let total_bytes = file.metadata().await?.len() as usize;
check_upload_limit(total_bytes, *MAXIMUM_UPLOAD_MEGS_UNCOMPRESSED)?;
let compressed_data: Vec<u8> = gzipped_data_from_tokio_file(file).await?;
let compressed_size = compressed_data.len();
check_upload_limit(compressed_size, *MAXIMUM_UPLOAD_MEGS_COMPRESSED)?;
let progress_fn = self let progress_fn = self
.full_sync_progress_fn .full_sync_progress_fn
.take() .take()
.expect("progress func was not set"); .expect("progress func was not set");
let wrap1 = ProgressWrapper { let with_progress = ProgressWrapper {
reader: file, reader: Cursor::new(compressed_data),
progress_fn, progress_fn,
progress: FullSyncProgress { progress: FullSyncProgress {
transferred_bytes: 0, transferred_bytes: 0,
total_bytes, total_bytes: compressed_size,
}, },
}; };
let wrap2 = let body = Body::wrap_stream(with_progress);
tokio_util::io::ReaderStream::new(async_compression::tokio::bufread::GzipEncoder::new(
tokio_util::io::StreamReader::new(wrap1),
));
let body = Body::wrap_stream(wrap2);
self.upload_inner(body).await?; self.upload_inner(body).await?;
Ok(()) Ok(())
@ -174,6 +191,28 @@ impl SyncServer for HttpSyncClient {
} }
} }
async fn gzipped_data_from_tokio_file(file: tokio::fs::File) -> Result<Vec<u8>> {
let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
let mut stream = ReaderStream::new(file);
while let Some(chunk) = stream.next().await {
let chunk = chunk?;
encoder.write_all(&chunk)?;
}
encoder.finish().map_err(Into::into)
}
fn check_upload_limit(size: usize, limit_mb: usize) -> Result<()> {
let size_mb = size / 1024 / 1024;
if size_mb >= limit_mb {
Err(AnkiError::sync_error(
format!("{}MB > {}MB", size_mb, limit_mb),
SyncErrorKind::UploadTooLarge,
))
} else {
Ok(())
}
}
impl HttpSyncClient { impl HttpSyncClient {
pub fn new(hkey: Option<String>, host_number: u32) -> HttpSyncClient { pub fn new(hkey: Option<String>, host_number: u32) -> HttpSyncClient {
let timeouts = Timeouts::new(); let timeouts = Timeouts::new();

View file

@ -645,7 +645,6 @@ impl Collection {
let mut server = HttpSyncClient::new(Some(auth.hkey), auth.host_number); let mut server = HttpSyncClient::new(Some(auth.hkey), auth.host_number);
server.set_full_sync_progress_fn(Some(progress_fn)); server.set_full_sync_progress_fn(Some(progress_fn));
self.full_upload_inner(Box::new(server)).await self.full_upload_inner(Box::new(server)).await
// remote.upload(&col_path, progress_fn).await?;
} }
pub(crate) async fn full_upload_inner(mut self, server: Box<dyn SyncServer>) -> Result<()> { pub(crate) async fn full_upload_inner(mut self, server: Box<dyn SyncServer>) -> Result<()> {