mirror of
https://github.com/ankitects/anki.git
synced 2025-09-18 22:12:21 -04:00
Refactor export-import code and resolve fixmes (#1723)
* Write media files in chunks
* Test media file writing
* Add iter `ReadDirFiles`
* Remove ImportMediaError, fail fatally instead
Partially reverts commit f8ed4d89ba
.
* Compare hashes of media files to be restored
* Improve `MediaCopier::copy()`
* Restore media files atomically with tempfile
* Make downgrade flag an enum
* Remove SchemaVersion::Latest in favour of Option
* Remove sha1 comparison again
* Remove unnecessary repr(u8) (dae)
This commit is contained in:
parent
5781e86995
commit
16fe18d033
18 changed files with 277 additions and 131 deletions
|
@ -58,8 +58,6 @@ message BackendError {
|
|||
SEARCH_ERROR = 14;
|
||||
CUSTOM_STUDY_ERROR = 15;
|
||||
IMPORT_ERROR = 16;
|
||||
// Collection imported, but media import failed.
|
||||
IMPORT_MEDIA_ERROR = 17;
|
||||
}
|
||||
|
||||
// localized error description suitable for displaying to the user
|
||||
|
|
|
@ -24,7 +24,6 @@ from ..errors import (
|
|||
DBError,
|
||||
ExistsError,
|
||||
FilteredDeckError,
|
||||
ImportMediaError,
|
||||
Interrupted,
|
||||
InvalidInput,
|
||||
LocalizedError,
|
||||
|
@ -220,9 +219,6 @@ def backend_exception_to_pylib(err: backend_pb2.BackendError) -> Exception:
|
|||
elif val == kind.CUSTOM_STUDY_ERROR:
|
||||
return CustomStudyError(err.localized)
|
||||
|
||||
elif val == kind.IMPORT_MEDIA_ERROR:
|
||||
return ImportMediaError(err.localized)
|
||||
|
||||
else:
|
||||
# sadly we can't do exhaustiveness checking on protobuf enums
|
||||
# assert_exhaustive(val)
|
||||
|
|
|
@ -80,10 +80,6 @@ class AbortSchemaModification(Exception):
|
|||
pass
|
||||
|
||||
|
||||
class ImportMediaError(LocalizedError):
|
||||
pass
|
||||
|
||||
|
||||
# legacy
|
||||
DeckRenameError = FilteredDeckError
|
||||
AnkiError = AbortSchemaModification
|
||||
|
|
|
@ -11,7 +11,7 @@ import anki.importing as importing
|
|||
import aqt.deckchooser
|
||||
import aqt.forms
|
||||
import aqt.modelchooser
|
||||
from anki.errors import ImportMediaError, Interrupted
|
||||
from anki.errors import Interrupted
|
||||
from anki.importing.anki2 import V2ImportIntoV1
|
||||
from anki.importing.apkg import AnkiPackageImporter
|
||||
from aqt import AnkiQt, gui_hooks
|
||||
|
@ -457,8 +457,7 @@ def replace_with_apkg(
|
|||
mw: aqt.AnkiQt, filename: str, callback: Callable[[bool], None]
|
||||
) -> None:
|
||||
"""Tries to replace the provided collection with the provided backup,
|
||||
then calls the callback. True if collection file was imported (even
|
||||
if media failed).
|
||||
then calls the callback. True if success.
|
||||
"""
|
||||
dialog = mw.progress.start(immediate=True)
|
||||
timer = QTimer()
|
||||
|
@ -496,8 +495,7 @@ def replace_with_apkg(
|
|||
except Exception as error:
|
||||
if not isinstance(error, Interrupted):
|
||||
showWarning(str(error))
|
||||
collection_file_imported = isinstance(error, ImportMediaError)
|
||||
callback(collection_file_imported)
|
||||
callback(False)
|
||||
else:
|
||||
callback(True)
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@ use crate::{
|
|||
collection::{backup, CollectionBuilder},
|
||||
log::{self},
|
||||
prelude::*,
|
||||
storage::SchemaVersion,
|
||||
};
|
||||
|
||||
impl CollectionService for Backend {
|
||||
|
@ -56,7 +57,7 @@ impl CollectionService for Backend {
|
|||
|
||||
if input.downgrade_to_schema11 {
|
||||
let log = log::terminal();
|
||||
if let Err(e) = col_inner.close(input.downgrade_to_schema11) {
|
||||
if let Err(e) = col_inner.close(Some(SchemaVersion::V11)) {
|
||||
error!(log, " failed: {:?}", e);
|
||||
}
|
||||
}
|
||||
|
@ -72,6 +73,7 @@ impl CollectionService for Backend {
|
|||
|
||||
Ok(().into())
|
||||
}
|
||||
|
||||
fn check_database(&self, _input: pb::Empty) -> Result<pb::CheckDatabaseResponse> {
|
||||
let mut handler = self.new_progress_handler();
|
||||
let progress_fn = move |progress, throttle| {
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
use crate::{
|
||||
backend_proto as pb,
|
||||
backend_proto::backend_error::Kind,
|
||||
error::{AnkiError, ImportError, SyncErrorKind},
|
||||
error::{AnkiError, SyncErrorKind},
|
||||
prelude::*,
|
||||
};
|
||||
|
||||
|
@ -34,7 +34,6 @@ impl AnkiError {
|
|||
AnkiError::MultipleNotetypesSelected => Kind::InvalidInput,
|
||||
AnkiError::DatabaseCheckRequired => Kind::InvalidInput,
|
||||
AnkiError::CustomStudyError(_) => Kind::CustomStudyError,
|
||||
AnkiError::ImportError(ImportError::MediaImportFailed(_)) => Kind::ImportMediaError,
|
||||
AnkiError::ImportError(_) => Kind::ImportError,
|
||||
AnkiError::FileIoError(_) => Kind::IoError,
|
||||
AnkiError::MediaCheckRequired => Kind::InvalidInput,
|
||||
|
|
|
@ -39,7 +39,6 @@ impl ImportExportService for Backend {
|
|||
&input.backup_path,
|
||||
&input.col_path,
|
||||
&input.media_folder,
|
||||
&self.tr,
|
||||
self.import_progress_fn(),
|
||||
)
|
||||
.map(Into::into)
|
||||
|
|
|
@ -16,7 +16,7 @@ use crate::{
|
|||
log::{default_logger, Logger},
|
||||
notetype::{Notetype, NotetypeId},
|
||||
scheduler::{queue::CardQueues, SchedulerInfo},
|
||||
storage::SqliteStorage,
|
||||
storage::{SchemaVersion, SqliteStorage},
|
||||
types::Usn,
|
||||
undo::UndoManager,
|
||||
};
|
||||
|
@ -141,8 +141,8 @@ impl Collection {
|
|||
builder
|
||||
}
|
||||
|
||||
pub(crate) fn close(self, downgrade: bool) -> Result<()> {
|
||||
self.storage.close(downgrade)
|
||||
pub(crate) fn close(self, desired_version: Option<SchemaVersion>) -> Result<()> {
|
||||
self.storage.close(desired_version)
|
||||
}
|
||||
|
||||
pub(crate) fn usn(&self) -> Result<Usn> {
|
||||
|
|
|
@ -4,25 +4,27 @@
|
|||
use std::{
|
||||
borrow::Cow,
|
||||
collections::HashMap,
|
||||
fs::{read_dir, DirEntry, File},
|
||||
fs::{DirEntry, File},
|
||||
io::{self, Read, Write},
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use prost::Message;
|
||||
use sha1::Sha1;
|
||||
use tempfile::NamedTempFile;
|
||||
use zip::{write::FileOptions, CompressionMethod, ZipWriter};
|
||||
use zstd::{
|
||||
stream::{raw::Encoder as RawEncoder, zio::Writer},
|
||||
stream::{raw::Encoder as RawEncoder, zio},
|
||||
Encoder,
|
||||
};
|
||||
|
||||
use super::super::{MediaEntries, MediaEntry, Meta, Version};
|
||||
use crate::{
|
||||
collection::CollectionBuilder,
|
||||
io::atomic_rename,
|
||||
media::files::{filename_if_normalized, sha1_of_data},
|
||||
io::{atomic_rename, read_dir_files, tempfile_in_parent_of},
|
||||
media::files::filename_if_normalized,
|
||||
prelude::*,
|
||||
storage::SchemaVersion,
|
||||
};
|
||||
|
||||
/// Enable multithreaded compression if over this size. For smaller files,
|
||||
|
@ -39,7 +41,7 @@ impl Collection {
|
|||
progress_fn: impl FnMut(usize),
|
||||
) -> Result<()> {
|
||||
let colpkg_name = out_path.as_ref();
|
||||
let temp_colpkg = NamedTempFile::new_in(colpkg_name.parent().ok_or(AnkiError::NotFound)?)?;
|
||||
let temp_colpkg = tempfile_in_parent_of(colpkg_name)?;
|
||||
let src_path = self.col_path.clone();
|
||||
let src_media_folder = if include_media {
|
||||
Some(self.media_folder.clone())
|
||||
|
@ -47,11 +49,11 @@ impl Collection {
|
|||
None
|
||||
};
|
||||
let tr = self.tr.clone();
|
||||
// FIXME: downgrade on v3 export is superfluous at current schema version. We don't
|
||||
// want things to break when the schema is bumped in the future, so perhaps the
|
||||
// exporting code should be downgrading to 18 instead of 11 (which will probably require
|
||||
// changing the boolean to an enum).
|
||||
self.close(true)?;
|
||||
self.close(Some(if legacy {
|
||||
SchemaVersion::V11
|
||||
} else {
|
||||
SchemaVersion::V18
|
||||
}))?;
|
||||
|
||||
export_collection_file(
|
||||
temp_colpkg.path(),
|
||||
|
@ -172,7 +174,7 @@ fn create_dummy_collection_file(tr: &I18n) -> Result<NamedTempFile> {
|
|||
.storage
|
||||
.db
|
||||
.execute_batch("pragma page_size=512; pragma journal_mode=delete; vacuum;")?;
|
||||
dummy_col.close(true)?;
|
||||
dummy_col.close(Some(SchemaVersion::V11))?;
|
||||
|
||||
Ok(tempfile)
|
||||
}
|
||||
|
@ -253,35 +255,30 @@ fn write_media_files(
|
|||
media_entries: &mut Vec<MediaEntry>,
|
||||
mut progress_fn: impl FnMut(usize),
|
||||
) -> Result<()> {
|
||||
let mut writer = MediaFileWriter::new(meta);
|
||||
let mut index = 0;
|
||||
for entry in read_dir(dir)? {
|
||||
let entry = entry?;
|
||||
if !entry.metadata()?.is_file() {
|
||||
continue;
|
||||
}
|
||||
let mut copier = MediaCopier::new(meta);
|
||||
for (index, entry) in read_dir_files(dir)?.enumerate() {
|
||||
progress_fn(index);
|
||||
|
||||
zip.start_file(index.to_string(), file_options_stored())?;
|
||||
|
||||
let entry = entry?;
|
||||
let name = normalized_unicode_file_name(&entry)?;
|
||||
// FIXME: we should chunk this
|
||||
let data = std::fs::read(entry.path())?;
|
||||
let media_entry = make_media_entry(&data, name);
|
||||
writer = writer.write(&mut std::io::Cursor::new(data), zip)?;
|
||||
media_entries.push(media_entry);
|
||||
// can't enumerate(), as we skip folders
|
||||
index += 1;
|
||||
let mut file = File::open(entry.path())?;
|
||||
|
||||
let (size, sha1) = copier.copy(&mut file, zip)?;
|
||||
media_entries.push(MediaEntry::new(name, size, sha1));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn make_media_entry(data: &[u8], name: String) -> MediaEntry {
|
||||
MediaEntry {
|
||||
name,
|
||||
size: data.len() as u32,
|
||||
sha1: sha1_of_data(data).to_vec(),
|
||||
impl MediaEntry {
|
||||
fn new(name: impl Into<String>, size: impl TryInto<u32>, sha1: impl Into<Vec<u8>>) -> Self {
|
||||
MediaEntry {
|
||||
name: name.into(),
|
||||
size: size.try_into().unwrap_or_default(),
|
||||
sha1: sha1.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -298,29 +295,112 @@ fn normalized_unicode_file_name(entry: &DirEntry) -> Result<String> {
|
|||
.ok_or(AnkiError::MediaCheckRequired)
|
||||
}
|
||||
|
||||
/// Writes media files while compressing according to the targeted version.
|
||||
/// Copies and hashes while encoding according to the targeted version.
|
||||
/// If compressing, the encoder is reused to optimize for repeated calls.
|
||||
struct MediaFileWriter(Option<RawEncoder<'static>>);
|
||||
struct MediaCopier {
|
||||
encoding: bool,
|
||||
encoder: Option<RawEncoder<'static>>,
|
||||
}
|
||||
|
||||
impl MediaFileWriter {
|
||||
impl MediaCopier {
|
||||
fn new(meta: &Meta) -> Self {
|
||||
Self(
|
||||
meta.zstd_compressed()
|
||||
.then(|| RawEncoder::with_dictionary(0, &[]).unwrap()),
|
||||
)
|
||||
Self {
|
||||
encoding: meta.zstd_compressed(),
|
||||
encoder: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn write(mut self, reader: &mut impl Read, writer: &mut impl Write) -> Result<Self> {
|
||||
// take [self] by value to prevent it from being reused after an error
|
||||
if let Some(encoder) = self.0.take() {
|
||||
let mut encoder_writer = Writer::new(writer, encoder);
|
||||
io::copy(reader, &mut encoder_writer)?;
|
||||
encoder_writer.finish()?;
|
||||
self.0 = Some(encoder_writer.into_inner().1);
|
||||
} else {
|
||||
io::copy(reader, writer)?;
|
||||
fn encoder(&mut self) -> Option<RawEncoder<'static>> {
|
||||
self.encoding.then(|| {
|
||||
self.encoder
|
||||
.take()
|
||||
.unwrap_or_else(|| RawEncoder::with_dictionary(0, &[]).unwrap())
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns size and sha1 hash of the copied data.
|
||||
fn copy(
|
||||
&mut self,
|
||||
reader: &mut impl Read,
|
||||
writer: &mut impl Write,
|
||||
) -> Result<(usize, [u8; 20])> {
|
||||
let mut size = 0;
|
||||
let mut hasher = Sha1::new();
|
||||
let mut buf = [0; 64 * 1024];
|
||||
let mut wrapped_writer = MaybeEncodedWriter::new(writer, self.encoder());
|
||||
|
||||
loop {
|
||||
let count = match reader.read(&mut buf) {
|
||||
Ok(0) => break,
|
||||
Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
|
||||
result => result?,
|
||||
};
|
||||
size += count;
|
||||
hasher.update(&buf[..count]);
|
||||
wrapped_writer.write(&buf[..count])?;
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
self.encoder = wrapped_writer.finish()?;
|
||||
|
||||
Ok((size, hasher.digest().bytes()))
|
||||
}
|
||||
}
|
||||
|
||||
enum MaybeEncodedWriter<'a, W: Write> {
|
||||
Stored(&'a mut W),
|
||||
Encoded(zio::Writer<&'a mut W, RawEncoder<'static>>),
|
||||
}
|
||||
|
||||
impl<'a, W: Write> MaybeEncodedWriter<'a, W> {
|
||||
fn new(writer: &'a mut W, encoder: Option<RawEncoder<'static>>) -> Self {
|
||||
if let Some(encoder) = encoder {
|
||||
Self::Encoded(zio::Writer::new(writer, encoder))
|
||||
} else {
|
||||
Self::Stored(writer)
|
||||
}
|
||||
}
|
||||
|
||||
fn write(&mut self, buf: &[u8]) -> Result<()> {
|
||||
match self {
|
||||
Self::Stored(writer) => writer.write_all(buf)?,
|
||||
Self::Encoded(writer) => writer.write_all(buf)?,
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn finish(self) -> Result<Option<RawEncoder<'static>>> {
|
||||
Ok(match self {
|
||||
Self::Stored(_) => None,
|
||||
Self::Encoded(mut writer) => {
|
||||
writer.finish()?;
|
||||
Some(writer.into_inner().1)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::media::files::sha1_of_data;
|
||||
|
||||
#[test]
|
||||
fn media_file_writing() {
|
||||
let bytes = b"foo";
|
||||
let bytes_hash = sha1_of_data(b"foo");
|
||||
|
||||
for meta in [Meta::new_legacy(), Meta::new()] {
|
||||
let mut writer = MediaCopier::new(&meta);
|
||||
let mut buf = Vec::new();
|
||||
|
||||
let (size, hash) = writer.copy(&mut bytes.as_slice(), &mut buf).unwrap();
|
||||
if meta.zstd_compressed() {
|
||||
buf = zstd::decode_all(buf.as_slice()).unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(buf, bytes);
|
||||
assert_eq!(size, bytes.len());
|
||||
assert_eq!(hash, bytes_hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,8 +10,7 @@ use std::{
|
|||
};
|
||||
|
||||
use prost::Message;
|
||||
use tempfile::NamedTempFile;
|
||||
use zip::ZipArchive;
|
||||
use zip::{read::ZipFile, ZipArchive};
|
||||
use zstd::{self, stream::copy_decode};
|
||||
|
||||
use super::super::Version;
|
||||
|
@ -22,7 +21,7 @@ use crate::{
|
|||
package::{MediaEntries, MediaEntry, Meta},
|
||||
ImportProgress,
|
||||
},
|
||||
io::atomic_rename,
|
||||
io::{atomic_rename, tempfile_in_parent_of},
|
||||
media::files::normalize_filename,
|
||||
prelude::*,
|
||||
};
|
||||
|
@ -58,15 +57,11 @@ pub fn import_colpkg(
|
|||
colpkg_path: &str,
|
||||
target_col_path: &str,
|
||||
target_media_folder: &str,
|
||||
tr: &I18n,
|
||||
mut progress_fn: impl FnMut(ImportProgress) -> Result<()>,
|
||||
) -> Result<()> {
|
||||
progress_fn(ImportProgress::Collection)?;
|
||||
let col_path = PathBuf::from(target_col_path);
|
||||
let col_dir = col_path
|
||||
.parent()
|
||||
.ok_or_else(|| AnkiError::invalid_input("bad collection path"))?;
|
||||
let mut tempfile = NamedTempFile::new_in(col_dir)?;
|
||||
let mut tempfile = tempfile_in_parent_of(&col_path)?;
|
||||
|
||||
let backup_file = File::open(colpkg_path)?;
|
||||
let mut archive = ZipArchive::new(backup_file)?;
|
||||
|
@ -78,17 +73,9 @@ pub fn import_colpkg(
|
|||
progress_fn(ImportProgress::Collection)?;
|
||||
|
||||
let media_folder = Path::new(target_media_folder);
|
||||
let media_import_result = restore_media(&meta, progress_fn, &mut archive, media_folder)
|
||||
.map_err(|err| {
|
||||
AnkiError::ImportError(ImportError::MediaImportFailed(
|
||||
err.localized_description(tr),
|
||||
))
|
||||
});
|
||||
restore_media(&meta, progress_fn, &mut archive, media_folder)?;
|
||||
|
||||
// Proceed with replacing collection, regardless of media import result
|
||||
atomic_rename(tempfile, &col_path)?;
|
||||
|
||||
media_import_result
|
||||
atomic_rename(tempfile, &col_path)
|
||||
}
|
||||
|
||||
fn check_collection(col_path: &Path) -> Result<()> {
|
||||
|
@ -113,48 +100,72 @@ fn restore_media(
|
|||
) -> Result<()> {
|
||||
let media_entries = extract_media_entries(meta, archive)?;
|
||||
std::fs::create_dir_all(media_folder)?;
|
||||
let mut count = 0;
|
||||
|
||||
for (archive_file_name, entry) in media_entries.iter().enumerate() {
|
||||
count += 1;
|
||||
if count % 10 == 0 {
|
||||
progress_fn(ImportProgress::Media(count))?;
|
||||
if archive_file_name % 10 == 0 {
|
||||
progress_fn(ImportProgress::Media(archive_file_name))?;
|
||||
}
|
||||
|
||||
if let Ok(mut zip_file) = archive.by_name(&archive_file_name.to_string()) {
|
||||
check_filename_safe(&entry.name)?;
|
||||
let normalized = maybe_normalizing(&entry.name, meta.strict_media_checks())?;
|
||||
let file_path = media_folder.join(normalized.as_ref());
|
||||
let size_in_colpkg = if meta.media_list_is_hashmap() {
|
||||
zip_file.size()
|
||||
} else {
|
||||
entry.size as u64
|
||||
};
|
||||
let files_are_equal = fs::metadata(&file_path)
|
||||
.map(|metadata| metadata.len() == size_in_colpkg)
|
||||
.unwrap_or_default();
|
||||
if !files_are_equal {
|
||||
// FIXME: write to temp file and atomic rename
|
||||
let mut file = match File::create(&file_path) {
|
||||
Ok(file) => file,
|
||||
Err(err) => return Err(AnkiError::file_io_error(err, &file_path)),
|
||||
};
|
||||
if meta.zstd_compressed() {
|
||||
copy_decode(&mut zip_file, &mut file)
|
||||
} else {
|
||||
io::copy(&mut zip_file, &mut file).map(|_| ())
|
||||
}
|
||||
.map_err(|err| AnkiError::file_io_error(err, &file_path))?;
|
||||
}
|
||||
maybe_restore_media_file(meta, media_folder, entry, &mut zip_file)?;
|
||||
} else {
|
||||
return Err(AnkiError::invalid_input(&format!(
|
||||
"{archive_file_name} missing from archive"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn maybe_restore_media_file(
|
||||
meta: &Meta,
|
||||
media_folder: &Path,
|
||||
entry: &MediaEntry,
|
||||
zip_file: &mut ZipFile,
|
||||
) -> Result<()> {
|
||||
let file_path = entry.safe_normalized_file_path(meta, media_folder)?;
|
||||
let already_exists = entry.is_equal_to(meta, zip_file, &file_path);
|
||||
if !already_exists {
|
||||
restore_media_file(meta, zip_file, &file_path)?;
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn restore_media_file(meta: &Meta, zip_file: &mut ZipFile, path: &Path) -> Result<()> {
|
||||
let mut tempfile = tempfile_in_parent_of(path)?;
|
||||
|
||||
if meta.zstd_compressed() {
|
||||
copy_decode(zip_file, &mut tempfile)
|
||||
} else {
|
||||
io::copy(zip_file, &mut tempfile).map(|_| ())
|
||||
}
|
||||
.map_err(|err| AnkiError::file_io_error(err, path))?;
|
||||
|
||||
atomic_rename(tempfile, path)
|
||||
}
|
||||
|
||||
impl MediaEntry {
|
||||
fn safe_normalized_file_path(&self, meta: &Meta, media_folder: &Path) -> Result<PathBuf> {
|
||||
check_filename_safe(&self.name)?;
|
||||
let normalized = maybe_normalizing(&self.name, meta.strict_media_checks())?;
|
||||
Ok(media_folder.join(normalized.as_ref()))
|
||||
}
|
||||
|
||||
fn is_equal_to(&self, meta: &Meta, self_zipped: &ZipFile, other_path: &Path) -> bool {
|
||||
// TODO: checks hashs (https://github.com/ankitects/anki/pull/1723#discussion_r829653147)
|
||||
let self_size = if meta.media_list_is_hashmap() {
|
||||
self_zipped.size()
|
||||
} else {
|
||||
self.size as u64
|
||||
};
|
||||
fs::metadata(other_path)
|
||||
.map(|metadata| metadata.len() as u64 == self_size)
|
||||
.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
/// - If strict is true, return an error if not normalized.
|
||||
/// - If false, return the normalized version.
|
||||
fn maybe_normalizing(name: &str, strict: bool) -> Result<Cow<str>> {
|
||||
|
|
|
@ -40,7 +40,6 @@ fn roundtrip() -> Result<()> {
|
|||
for (legacy, name) in [(true, "legacy"), (false, "v3")] {
|
||||
// export to a file
|
||||
let col = collection_with_media(dir, name)?;
|
||||
let tr = col.tr.clone();
|
||||
let colpkg_name = dir.join(format!("{name}.colpkg"));
|
||||
col.export_colpkg(&colpkg_name, true, legacy, |_| ())?;
|
||||
// import into a new collection
|
||||
|
@ -53,7 +52,6 @@ fn roundtrip() -> Result<()> {
|
|||
&colpkg_name.to_string_lossy(),
|
||||
&anki2_name,
|
||||
import_media_dir.to_str().unwrap(),
|
||||
&tr,
|
||||
|_| Ok(()),
|
||||
)?;
|
||||
// confirm collection imported
|
||||
|
|
|
@ -7,6 +7,13 @@ use tempfile::NamedTempFile;
|
|||
|
||||
use crate::prelude::*;
|
||||
|
||||
pub(crate) fn tempfile_in_parent_of(file: &Path) -> Result<NamedTempFile> {
|
||||
let dir = file
|
||||
.parent()
|
||||
.ok_or_else(|| AnkiError::invalid_input("not a file path"))?;
|
||||
NamedTempFile::new_in(dir).map_err(|err| AnkiError::file_io_error(err, dir))
|
||||
}
|
||||
|
||||
pub(crate) fn atomic_rename(file: NamedTempFile, target: &Path) -> Result<()> {
|
||||
file.as_file().sync_all()?;
|
||||
file.persist(&target)
|
||||
|
@ -20,3 +27,27 @@ pub(crate) fn atomic_rename(file: NamedTempFile, target: &Path) -> Result<()> {
|
|||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Like [std::fs::read_dir], but only yielding files. [Err]s are not filtered.
|
||||
pub(crate) fn read_dir_files(path: impl AsRef<Path>) -> std::io::Result<ReadDirFiles> {
|
||||
std::fs::read_dir(path).map(ReadDirFiles)
|
||||
}
|
||||
|
||||
pub(crate) struct ReadDirFiles(std::fs::ReadDir);
|
||||
|
||||
impl Iterator for ReadDirFiles {
|
||||
type Item = std::io::Result<std::fs::DirEntry>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let next = self.0.next();
|
||||
if let Some(Ok(entry)) = next.as_ref() {
|
||||
match entry.metadata().map(|metadata| metadata.is_file()) {
|
||||
Ok(true) => next,
|
||||
Ok(false) => self.next(),
|
||||
Err(error) => Some(Err(error)),
|
||||
}
|
||||
} else {
|
||||
next
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -281,7 +281,7 @@ fn existing_file_sha1(path: &Path) -> io::Result<Option<[u8; 20]>> {
|
|||
}
|
||||
|
||||
/// Return the SHA1 of a file, failing if it doesn't exist.
|
||||
pub(super) fn sha1_of_file(path: &Path) -> io::Result<[u8; 20]> {
|
||||
pub(crate) fn sha1_of_file(path: &Path) -> io::Result<[u8; 20]> {
|
||||
let mut file = fs::File::open(path)?;
|
||||
let mut hasher = Sha1::new();
|
||||
let mut buf = [0; 64 * 1024];
|
||||
|
|
|
@ -21,6 +21,18 @@ use std::fmt::Write;
|
|||
pub(crate) use sqlite::SqliteStorage;
|
||||
pub(crate) use sync::open_and_check_sqlite_file;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub(crate) enum SchemaVersion {
|
||||
V11,
|
||||
V18,
|
||||
}
|
||||
|
||||
impl SchemaVersion {
|
||||
pub(super) fn has_journal_mode_delete(self) -> bool {
|
||||
self == Self::V11
|
||||
}
|
||||
}
|
||||
|
||||
/// Write a list of IDs as '(x,y,...)' into the provided string.
|
||||
pub(crate) fn ids_to_string<T>(buf: &mut String, ids: &[T])
|
||||
where
|
||||
|
|
|
@ -8,7 +8,10 @@ use regex::Regex;
|
|||
use rusqlite::{functions::FunctionFlags, params, Connection};
|
||||
use unicase::UniCase;
|
||||
|
||||
use super::upgrades::{SCHEMA_MAX_VERSION, SCHEMA_MIN_VERSION, SCHEMA_STARTING_VERSION};
|
||||
use super::{
|
||||
upgrades::{SCHEMA_MAX_VERSION, SCHEMA_MIN_VERSION, SCHEMA_STARTING_VERSION},
|
||||
SchemaVersion,
|
||||
};
|
||||
use crate::{
|
||||
config::schema11::schema11_config_as_string,
|
||||
error::{AnkiError, DbErrorKind, Result},
|
||||
|
@ -261,10 +264,12 @@ impl SqliteStorage {
|
|||
Ok(storage)
|
||||
}
|
||||
|
||||
pub(crate) fn close(self, downgrade: bool) -> Result<()> {
|
||||
if downgrade {
|
||||
self.downgrade_to_schema_11()?;
|
||||
self.db.pragma_update(None, "journal_mode", &"delete")?;
|
||||
pub(crate) fn close(self, desired_version: Option<SchemaVersion>) -> Result<()> {
|
||||
if let Some(version) = desired_version {
|
||||
self.downgrade_to(version)?;
|
||||
if version.has_journal_mode_delete() {
|
||||
self.db.pragma_update(None, "journal_mode", &"delete")?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ pub(super) const SCHEMA_STARTING_VERSION: u8 = 11;
|
|||
/// The maximum schema version we can open.
|
||||
pub(super) const SCHEMA_MAX_VERSION: u8 = 18;
|
||||
|
||||
use super::SqliteStorage;
|
||||
use super::{SchemaVersion, SqliteStorage};
|
||||
use crate::error::Result;
|
||||
|
||||
impl SqliteStorage {
|
||||
|
@ -48,7 +48,14 @@ impl SqliteStorage {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) fn downgrade_to_schema_11(&self) -> Result<()> {
|
||||
pub(super) fn downgrade_to(&self, ver: SchemaVersion) -> Result<()> {
|
||||
match ver {
|
||||
SchemaVersion::V11 => self.downgrade_to_schema_11(),
|
||||
SchemaVersion::V18 => Ok(()),
|
||||
}
|
||||
}
|
||||
|
||||
fn downgrade_to_schema_11(&self) -> Result<()> {
|
||||
self.begin_trx()?;
|
||||
|
||||
self.db
|
||||
|
@ -66,3 +73,17 @@ impl SqliteStorage {
|
|||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::assertions_on_constants)]
|
||||
fn assert_18_is_latest_schema_version() {
|
||||
assert!(
|
||||
18 == SCHEMA_MAX_VERSION,
|
||||
"must implement SqliteStorage::downgrade_to(SchemaVersion::V18)"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ use crate::{
|
|||
serde::{default_on_invalid, deserialize_int_from_number},
|
||||
storage::{
|
||||
card::data::{card_data_string, original_position_from_card_data},
|
||||
open_and_check_sqlite_file,
|
||||
open_and_check_sqlite_file, SchemaVersion,
|
||||
},
|
||||
tags::{join_tags, split_tags, Tag},
|
||||
};
|
||||
|
@ -654,7 +654,7 @@ impl Collection {
|
|||
pub(crate) async fn full_upload_inner(mut self, server: Box<dyn SyncServer>) -> Result<()> {
|
||||
self.before_upload()?;
|
||||
let col_path = self.col_path.clone();
|
||||
self.close(true)?;
|
||||
self.close(Some(SchemaVersion::V11))?;
|
||||
server.full_upload(&col_path, false).await
|
||||
}
|
||||
|
||||
|
@ -674,7 +674,7 @@ impl Collection {
|
|||
let col_folder = col_path
|
||||
.parent()
|
||||
.ok_or_else(|| AnkiError::invalid_input("couldn't get col_folder"))?;
|
||||
self.close(false)?;
|
||||
self.close(None)?;
|
||||
let out_file = server.full_download(Some(col_folder)).await?;
|
||||
// check file ok
|
||||
let db = open_and_check_sqlite_file(out_file.path())?;
|
||||
|
|
|
@ -10,7 +10,7 @@ use super::ChunkableIds;
|
|||
use crate::{
|
||||
collection::CollectionBuilder,
|
||||
prelude::*,
|
||||
storage::open_and_check_sqlite_file,
|
||||
storage::{open_and_check_sqlite_file, SchemaVersion},
|
||||
sync::{
|
||||
Chunk, Graves, SanityCheckCounts, SanityCheckResponse, SanityCheckStatus, SyncMeta,
|
||||
UnchunkedChanges, Usn,
|
||||
|
@ -207,7 +207,7 @@ impl SyncServer for LocalServer {
|
|||
})?;
|
||||
|
||||
let target_col_path = self.col.col_path.clone();
|
||||
self.col.close(false)?;
|
||||
self.col.close(None)?;
|
||||
fs::rename(col_path, &target_col_path).map_err(Into::into)
|
||||
}
|
||||
|
||||
|
@ -221,7 +221,7 @@ impl SyncServer for LocalServer {
|
|||
self.col
|
||||
.transact_no_undo(|col| col.storage.increment_usn())?;
|
||||
let col_path = self.col.col_path.clone();
|
||||
self.col.close(true)?;
|
||||
self.col.close(Some(SchemaVersion::V11))?;
|
||||
|
||||
// copy file and return path
|
||||
let temp_file = NamedTempFile::new()?;
|
||||
|
|
Loading…
Reference in a new issue