mirror of
https://github.com/ankitects/anki.git
synced 2025-09-21 07:22:23 -04:00
Don't enforce download size on client
https://forums.ankiweb.net/t/local-sync-server-collection-exceeds-size-limit/27183/6
This commit is contained in:
parent
855dc9d75b
commit
92cf5cd898
2 changed files with 8 additions and 6 deletions
|
@ -29,7 +29,7 @@ use crate::sync::error::HttpError;
|
||||||
use crate::sync::error::HttpResult;
|
use crate::sync::error::HttpResult;
|
||||||
use crate::sync::error::HttpSnafu;
|
use crate::sync::error::HttpSnafu;
|
||||||
use crate::sync::error::OrHttpErr;
|
use crate::sync::error::OrHttpErr;
|
||||||
use crate::sync::request::header_and_stream::decode_zstd_body_stream;
|
use crate::sync::request::header_and_stream::decode_zstd_body_stream_for_client;
|
||||||
use crate::sync::request::header_and_stream::encode_zstd_body_stream;
|
use crate::sync::request::header_and_stream::encode_zstd_body_stream;
|
||||||
use crate::sync::response::ORIGINAL_SIZE;
|
use crate::sync::response::ORIGINAL_SIZE;
|
||||||
|
|
||||||
|
@ -131,7 +131,7 @@ impl IoMonitor {
|
||||||
let response_stream = self.wrap_stream(
|
let response_stream = self.wrap_stream(
|
||||||
false,
|
false,
|
||||||
response_total,
|
response_total,
|
||||||
decode_zstd_body_stream(resp.bytes_stream()),
|
decode_zstd_body_stream_for_client(resp.bytes_stream()),
|
||||||
);
|
);
|
||||||
let mut reader =
|
let mut reader =
|
||||||
StreamReader::new(response_stream.map_err(|e| {
|
StreamReader::new(response_stream.map_err(|e| {
|
||||||
|
|
|
@ -38,7 +38,7 @@ impl<T> SyncRequest<T> {
|
||||||
T: DeserializeOwned,
|
T: DeserializeOwned,
|
||||||
{
|
{
|
||||||
sync_header.sync_version.ensure_supported()?;
|
sync_header.sync_version.ensure_supported()?;
|
||||||
let data = decode_zstd_body(body_stream).await?;
|
let data = decode_zstd_body_for_server(body_stream).await?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
sync_key: sync_header.sync_key,
|
sync_key: sync_header.sync_key,
|
||||||
session_key: sync_header.session_key,
|
session_key: sync_header.session_key,
|
||||||
|
@ -52,7 +52,8 @@ impl<T> SyncRequest<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn decode_zstd_body<S, E>(data: S) -> HttpResult<Vec<u8>>
|
/// Enforces max payload size
|
||||||
|
pub async fn decode_zstd_body_for_server<S, E>(data: S) -> HttpResult<Vec<u8>>
|
||||||
where
|
where
|
||||||
S: Stream<Item = Result<Bytes, E>> + Unpin,
|
S: Stream<Item = Result<Bytes, E>> + Unpin,
|
||||||
E: Display,
|
E: Display,
|
||||||
|
@ -70,7 +71,8 @@ where
|
||||||
Ok(buf)
|
Ok(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn decode_zstd_body_stream<S, E>(data: S) -> impl Stream<Item = HttpResult<Bytes>>
|
/// Does not enforce payload size
|
||||||
|
pub fn decode_zstd_body_stream_for_client<S, E>(data: S) -> impl Stream<Item = HttpResult<Bytes>>
|
||||||
where
|
where
|
||||||
S: Stream<Item = Result<Bytes, E>> + Unpin,
|
S: Stream<Item = Result<Bytes, E>> + Unpin,
|
||||||
E: Display,
|
E: Display,
|
||||||
|
@ -79,7 +81,7 @@ where
|
||||||
data.map_err(|e| std::io::Error::new(ErrorKind::ConnectionAborted, format!("{e}"))),
|
data.map_err(|e| std::io::Error::new(ErrorKind::ConnectionAborted, format!("{e}"))),
|
||||||
);
|
);
|
||||||
let reader = async_compression::tokio::bufread::ZstdDecoder::new(reader);
|
let reader = async_compression::tokio::bufread::ZstdDecoder::new(reader);
|
||||||
ReaderStream::new(reader.take(*MAXIMUM_SYNC_PAYLOAD_BYTES_UNCOMPRESSED)).map_err(|err| {
|
ReaderStream::new(reader).map_err(|err| {
|
||||||
HttpSnafu {
|
HttpSnafu {
|
||||||
code: StatusCode::BAD_REQUEST,
|
code: StatusCode::BAD_REQUEST,
|
||||||
context: "decode zstd body",
|
context: "decode zstd body",
|
||||||
|
|
Loading…
Reference in a new issue