mirror of
https://github.com/ankitects/anki.git
synced 2025-09-20 06:52:21 -04:00

The saved characters weren't worth the increased difficulty when reading, and the fact that we were deviating from protobuf norms.
109 lines
3.5 KiB
Rust
109 lines
3.5 KiB
Rust
// Copyright: Ankitects Pty Ltd and contributors
|
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
|
|
|
use slog::error;
|
|
|
|
use super::{progress::Progress, Backend};
|
|
pub(super) use crate::backend_proto::collection_service::Service as CollectionService;
|
|
use crate::{
|
|
backend::progress::progress_to_proto,
|
|
backend_proto as pb,
|
|
collection::open_collection,
|
|
log::{self, default_logger},
|
|
prelude::*,
|
|
};
|
|
|
|
impl CollectionService for Backend {
|
|
fn latest_progress(&self, _input: pb::Empty) -> Result<pb::Progress> {
|
|
let progress = self.progress_state.lock().unwrap().last_progress;
|
|
Ok(progress_to_proto(progress, &self.tr))
|
|
}
|
|
|
|
fn set_wants_abort(&self, _input: pb::Empty) -> Result<pb::Empty> {
|
|
self.progress_state.lock().unwrap().want_abort = true;
|
|
Ok(().into())
|
|
}
|
|
|
|
fn open_collection(&self, input: pb::OpenCollectionRequest) -> Result<pb::Empty> {
|
|
let mut col = self.col.lock().unwrap();
|
|
if col.is_some() {
|
|
return Err(AnkiError::CollectionAlreadyOpen);
|
|
}
|
|
|
|
let mut path = input.collection_path.clone();
|
|
path.push_str(".log");
|
|
|
|
let log_path = match input.log_path.as_str() {
|
|
"" => None,
|
|
path => Some(path),
|
|
};
|
|
let logger = default_logger(log_path)?;
|
|
|
|
let new_col = open_collection(
|
|
input.collection_path,
|
|
input.media_folder_path,
|
|
input.media_db_path,
|
|
self.server,
|
|
self.tr.clone(),
|
|
logger,
|
|
)?;
|
|
|
|
*col = Some(new_col);
|
|
|
|
Ok(().into())
|
|
}
|
|
|
|
fn close_collection(&self, input: pb::CloseCollectionRequest) -> Result<pb::Empty> {
|
|
self.abort_media_sync_and_wait();
|
|
|
|
let mut col = self.col.lock().unwrap();
|
|
if col.is_none() {
|
|
return Err(AnkiError::CollectionNotOpen);
|
|
}
|
|
|
|
let col_inner = col.take().unwrap();
|
|
if input.downgrade_to_schema11 {
|
|
let log = log::terminal();
|
|
if let Err(e) = col_inner.close(input.downgrade_to_schema11) {
|
|
error!(log, " failed: {:?}", e);
|
|
}
|
|
}
|
|
|
|
Ok(().into())
|
|
}
|
|
|
|
fn check_database(&self, _input: pb::Empty) -> Result<pb::CheckDatabaseResponse> {
|
|
let mut handler = self.new_progress_handler();
|
|
let progress_fn = move |progress, throttle| {
|
|
handler.update(Progress::DatabaseCheck(progress), throttle);
|
|
};
|
|
self.with_col(|col| {
|
|
col.check_database(progress_fn)
|
|
.map(|problems| pb::CheckDatabaseResponse {
|
|
problems: problems.to_i18n_strings(&col.tr),
|
|
})
|
|
})
|
|
}
|
|
|
|
fn get_undo_status(&self, _input: pb::Empty) -> Result<pb::UndoStatus> {
|
|
self.with_col(|col| Ok(col.undo_status().into_protobuf(&col.tr)))
|
|
}
|
|
|
|
fn undo(&self, _input: pb::Empty) -> Result<pb::OpChangesAfterUndo> {
|
|
self.with_col(|col| col.undo().map(|out| out.into_protobuf(&col.tr)))
|
|
}
|
|
|
|
fn redo(&self, _input: pb::Empty) -> Result<pb::OpChangesAfterUndo> {
|
|
self.with_col(|col| col.redo().map(|out| out.into_protobuf(&col.tr)))
|
|
}
|
|
|
|
fn add_custom_undo_entry(&self, input: pb::String) -> Result<pb::UInt32> {
|
|
self.with_col(|col| Ok(col.add_custom_undo_step(input.val).into()))
|
|
}
|
|
|
|
fn merge_undo_entries(&self, input: pb::UInt32) -> Result<pb::OpChanges> {
|
|
let starting_from = input.val as usize;
|
|
self.with_col(|col| col.merge_undoable_ops(starting_from))
|
|
.map(Into::into)
|
|
}
|
|
}
|