split deck config into separate SQL table

- on collection load, the schema is upgraded to 12
- on collection close, the changes are reversed so older clients
can continue to open the collection
- in the future, we could potentially skip the reversal except
when exporting/doing a full sync
- the same approach should work for decks, note types and tags in the
future too
- the deck list code needs updating to cache the deck confs for the
life of the call
This commit is contained in:
Damien Elmes 2020-03-30 20:01:16 +10:00
parent 004cc2b5f8
commit 35c03af520
17 changed files with 273 additions and 94 deletions

View file

@ -46,7 +46,7 @@ message BackendInput {
Empty empty_trash = 34;
Empty restore_trash = 35;
OpenCollectionIn open_collection = 36;
Empty close_collection = 37;
CloseCollectionIn close_collection = 37;
int64 get_card = 38;
Card update_card = 39;
Card add_card = 40;
@ -408,3 +408,7 @@ message Card {
uint32 flags = 17;
string data = 18;
}
message CloseCollectionIn {
bool downgrade_to_schema11 = 1;
}

View file

@ -44,7 +44,7 @@ defaultDynamicDeck = {
"desc": "",
"usn": 0,
"delays": None,
"separate": True,
"separate": True, # unused
# list of (search, limit, order); we only use first two elements for now
"terms": [["", 100, 0]],
"resched": True,
@ -59,7 +59,7 @@ defaultConf = {
"delays": [1, 10],
"ints": [1, 4, 7], # 7 is not currently used
"initialFactor": STARTING_FACTOR,
"separate": True,
"separate": True, # unused
"order": NEW_CARDS_DUE,
"perDay": 20,
# may not be set on old decks
@ -358,7 +358,7 @@ class DeckManager:
def allConf(self) -> List:
"A list of all deck config."
return list(self.col.backend.all_deck_config().values())
return list(self.col.backend.all_deck_config())
def confForDid(self, did: int) -> Any:
deck = self.get(did, default=False)

View file

@ -261,9 +261,12 @@ class RustBackend:
release_gil=True,
)
def close_collection(self):
def close_collection(self, downgrade=True):
self._run_command(
pb.BackendInput(close_collection=pb.Empty()), release_gil=True
pb.BackendInput(
close_collection=pb.CloseCollectionIn(downgrade_to_schema11=downgrade)
),
release_gil=True,
)
def template_requirements(
@ -501,7 +504,7 @@ class RustBackend:
).add_or_update_deck_config
conf["id"] = id
def all_deck_config(self) -> Dict[int, Dict[str, Any]]:
def all_deck_config(self) -> Sequence[Dict[str, Any]]:
jstr = self._run_command(
pb.BackendInput(all_deck_config=pb.Empty())
).all_deck_config

View file

@ -834,7 +834,6 @@ did = ?, queue = %s, due = ?, usn = ? where id = ?"""
bury=oconf["new"].get("bury", True),
# overrides
delays=delays,
separate=conf["separate"],
order=NEW_CARDS_DUE,
perDay=self.reportLimit,
)

View file

@ -1307,7 +1307,6 @@ where id = ?
bury=oconf["new"].get("bury", True),
delays=oconf["new"]["delays"],
# overrides
separate=conf["separate"],
order=NEW_CARDS_DUE,
perDay=self.reportLimit,
)

View file

@ -78,7 +78,7 @@ def initial_db_setup(db: DBProxy) -> None:
_addColVars(db, *_getColVars(db))
def _getColVars(db: DBProxy) -> Tuple[Any, Any, Dict[str, Any]]:
def _getColVars(db: DBProxy) -> Tuple[Any, Dict[str, Any]]:
import anki.collection
import anki.decks
@ -87,18 +87,13 @@ def _getColVars(db: DBProxy) -> Tuple[Any, Any, Dict[str, Any]]:
g["name"] = _("Default")
g["conf"] = 1
g["mod"] = intTime()
gc = copy.deepcopy(anki.decks.defaultConf)
gc["id"] = 1
return g, gc, anki.collection.defaultConf.copy()
return g, anki.collection.defaultConf.copy()
def _addColVars(
db: DBProxy, g: Dict[str, Any], gc: Dict[str, Any], c: Dict[str, Any]
) -> None:
def _addColVars(db: DBProxy, g: Dict[str, Any], c: Dict[str, Any]) -> None:
db.execute(
"""
update col set conf = ?, decks = ?, dconf = ?""",
update col set conf = ?, decks = ?""",
json.dumps(c),
json.dumps({"1": g}),
json.dumps({"1": gc}),
)

View file

@ -30,6 +30,7 @@ use crate::timestamp::TimestampSecs;
use crate::types::Usn;
use crate::{backend_proto as pb, log};
use fluent::FluentValue;
use log::error;
use prost::Message;
use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
@ -251,8 +252,8 @@ impl Backend {
self.open_collection(input)?;
OValue::OpenCollection(Empty {})
}
Value::CloseCollection(_) => {
self.close_collection()?;
Value::CloseCollection(input) => {
self.close_collection(input.downgrade_to_schema11)?;
OValue::CloseCollection(Empty {})
}
Value::SearchCards(input) => OValue::SearchCards(self.search_cards(input)?),
@ -305,7 +306,7 @@ impl Backend {
Ok(())
}
fn close_collection(&self) -> Result<()> {
fn close_collection(&self, downgrade: bool) -> Result<()> {
let mut col = self.col.lock().unwrap();
if col.is_none() {
return Err(AnkiError::CollectionNotOpen);
@ -315,7 +316,13 @@ impl Backend {
return Err(AnkiError::invalid_input("can't close yet"));
}
*col = None;
let col_inner = col.take().unwrap();
if downgrade {
let log = log::terminal();
if let Err(e) = col_inner.downgrade_and_close() {
error!(log, " failed: {:?}", e);
}
}
Ok(())
}
@ -683,7 +690,7 @@ impl Backend {
fn all_deck_config(&self) -> Result<String> {
self.with_col(|col| {
serde_json::to_string(&col.storage.all_deck_conf()?).map_err(Into::into)
serde_json::to_string(&col.storage.all_deck_config()?).map_err(Into::into)
})
}

View file

@ -130,6 +130,10 @@ impl Collection {
self.state.task_state == CollectionTaskState::Normal
}
pub(crate) fn downgrade_and_close(self) -> Result<()> {
self.storage.downgrade_to_schema_11()
}
pub fn timing_today(&mut self) -> Result<SchedTimingToday> {
if let Some(timing) = &self.state.timing_today {
if timing.next_day_at > TimestampSecs::now().0 {

View file

@ -55,6 +55,10 @@ pub struct NewConf {
#[serde(deserialize_with = "default_on_invalid")]
pub(crate) per_day: u32,
// unused, can remove in the future
#[serde(default)]
separate: bool,
#[serde(flatten)]
other: HashMap<String, Value>,
}
@ -161,6 +165,7 @@ impl Default for NewConf {
ints: NewCardIntervals::default(),
order: NewCardOrder::default(),
per_day: 20,
separate: true,
other: Default::default(),
}
}
@ -200,35 +205,32 @@ impl Default for DeckConf {
impl Collection {
pub fn get_deck_config(&self, dcid: DeckConfID, fallback: bool) -> Result<Option<DeckConf>> {
let conf = self.storage.all_deck_conf()?;
if let Some(conf) = conf.get(&dcid) {
return Ok(Some(conf.clone()));
if let Some(conf) = self.storage.get_deck_config(dcid)? {
return Ok(Some(conf));
}
if fallback {
if let Some(conf) = conf.get(&DeckConfID(1)) {
return Ok(Some(conf.clone()));
if let Some(conf) = self.storage.get_deck_config(DeckConfID(1))? {
return Ok(Some(conf));
}
// if even the default deck config is missing, just return the defaults
return Ok(Some(DeckConf::default()));
Ok(Some(DeckConf::default()))
} else {
Ok(None)
}
Ok(None)
}
pub(crate) fn add_or_update_deck_config(&self, conf: &mut DeckConf) -> Result<()> {
let mut allconf = self.storage.all_deck_conf()?;
if conf.id.0 == 0 {
conf.id.0 = TimestampMillis::now().0;
loop {
if !allconf.contains_key(&conf.id) {
break;
}
conf.id.0 += 1;
}
}
conf.mtime = TimestampSecs::now();
conf.usn = self.usn()?;
allconf.insert(conf.id, conf.clone());
self.storage.flush_deck_conf(&allconf)
let orig = self.storage.get_deck_config(conf.id)?;
if let Some(_orig) = orig {
self.storage.update_deck_conf(&conf)
} else {
if conf.id.0 == 0 {
conf.id.0 = TimestampMillis::now().0;
}
self.storage.add_deck_conf(conf)
}
}
pub(crate) fn remove_deck_config(&self, dcid: DeckConfID) -> Result<()> {
@ -236,8 +238,6 @@ impl Collection {
return Err(AnkiError::invalid_input("can't delete default conf"));
}
self.ensure_schema_modified()?;
let mut allconf = self.storage.all_deck_conf()?;
allconf.remove(&dcid);
self.storage.flush_deck_conf(&allconf)
self.storage.remove_deck_conf(dcid)
}
}

View file

@ -1,29 +0,0 @@
// Copyright: Ankitects Pty Ltd and contributors
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
use super::SqliteStorage;
use crate::{
deckconf::{DeckConf, DeckConfID},
err::{AnkiError, Result},
};
use rusqlite::{params, NO_PARAMS};
use std::collections::HashMap;
impl SqliteStorage {
pub(crate) fn all_deck_conf(&self) -> Result<HashMap<DeckConfID, DeckConf>> {
self.db
.prepare_cached("select dconf from col")?
.query_and_then(NO_PARAMS, |row| -> Result<_> {
Ok(serde_json::from_str(row.get_raw(0).as_str()?)?)
})?
.next()
.ok_or_else(|| AnkiError::invalid_input("no col table"))?
}
pub(crate) fn flush_deck_conf(&self, conf: &HashMap<DeckConfID, DeckConf>) -> Result<()> {
self.db
.prepare_cached("update col set dconf = ?")?
.execute(params![&serde_json::to_string(conf)?])?;
Ok(())
}
}

View file

@ -0,0 +1,22 @@
insert into deck_config (id, name, mtime_secs, usn, config)
values
(
(
case
when ?1 in (
select
id
from deck_config
) then (
select
max(id) + 1
from deck_config
)
else ?1
end
),
?,
?,
?,
?
);

View file

@ -0,0 +1,5 @@
select
config
from deck_config
where
id = ?;

View file

@ -0,0 +1,106 @@
// Copyright: Ankitects Pty Ltd and contributors
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
use super::SqliteStorage;
use crate::{
deckconf::{DeckConf, DeckConfID},
err::Result,
};
use rusqlite::{params, NO_PARAMS};
use std::collections::HashMap;
impl SqliteStorage {
pub(crate) fn all_deck_config(&self) -> Result<Vec<DeckConf>> {
self.db
.prepare_cached("select config from deck_config")?
.query_and_then(NO_PARAMS, |row| -> Result<_> {
Ok(serde_json::from_str(row.get_raw(0).as_str()?)?)
})?
.collect()
}
pub(crate) fn get_deck_config(&self, dcid: DeckConfID) -> Result<Option<DeckConf>> {
self.db
.prepare_cached(include_str!("get.sql"))?
.query_and_then(params![dcid], |row| -> Result<_> {
Ok(serde_json::from_str(row.get_raw(0).as_str()?)?)
})?
.next()
.transpose()
}
pub(crate) fn add_deck_conf(&self, conf: &mut DeckConf) -> Result<()> {
self.db
.prepare_cached(include_str!("add.sql"))?
.execute(params![
conf.id,
conf.name,
conf.mtime,
conf.usn,
&serde_json::to_string(conf)?,
])?;
let id = self.db.last_insert_rowid();
if conf.id.0 != id {
// if the initial ID conflicted, make sure the json is up to date
// as well
conf.id.0 = id;
self.update_deck_conf(conf)?;
}
Ok(())
}
pub(crate) fn update_deck_conf(&self, conf: &DeckConf) -> Result<()> {
self.db
.prepare_cached(include_str!("update.sql"))?
.execute(params![
conf.name,
conf.mtime,
conf.usn,
&serde_json::to_string(conf)?,
conf.id,
])?;
Ok(())
}
pub(crate) fn remove_deck_conf(&self, dcid: DeckConfID) -> Result<()> {
self.db
.prepare_cached("delete from deck_config where id=?")?
.execute(params![dcid])?;
Ok(())
}
// Creating/upgrading/downgrading
pub(super) fn add_default_deck_config(&self) -> Result<()> {
let mut conf = DeckConf::default();
conf.id.0 = 1;
self.add_deck_conf(&mut conf)
}
pub(super) fn upgrade_deck_conf_to_schema12(&self) -> Result<()> {
let conf = self
.db
.query_row_and_then("select dconf from col", NO_PARAMS, |row| {
let conf: Result<HashMap<DeckConfID, DeckConf>> =
serde_json::from_str(row.get_raw(0).as_str()?).map_err(Into::into);
conf
})?;
for (_, mut conf) in conf.into_iter() {
self.add_deck_conf(&mut conf)?;
}
self.db.execute_batch("update col set dconf=''")?;
Ok(())
}
pub(super) fn downgrade_deck_conf_from_schema12(&self) -> Result<()> {
let allconf = self.all_deck_config()?;
let confmap: HashMap<DeckConfID, DeckConf> =
allconf.into_iter().map(|c| (c.id, c)).collect();
self.db.execute(
"update col set dconf=?",
params![serde_json::to_string(&confmap)?],
)?;
Ok(())
}
}

View file

@ -0,0 +1,8 @@
update deck_config
set
name = ?,
mtime_secs = ?,
usn = ?,
config = ?
where
id = ?;

View file

@ -0,0 +1,4 @@
drop table deck_config;
update col
set
ver = 11;

View file

@ -0,0 +1,10 @@
create table deck_config (
id integer primary key not null,
name text not null collate unicase,
mtime_secs integer not null,
usn integer not null,
config text not null
);
update col
set
ver = 12;

View file

@ -21,7 +21,8 @@ use std::{borrow::Cow, collections::HashMap, path::Path};
use unicase::UniCase;
const SCHEMA_MIN_VERSION: u8 = 11;
const SCHEMA_MAX_VERSION: u8 = 11;
const SCHEMA_STARTING_VERSION: u8 = 11;
const SCHEMA_MAX_VERSION: u8 = 12;
fn unicase_compare(s1: &str, s2: &str) -> Ordering {
UniCase::new(s1).cmp(&UniCase::new(s2))
@ -141,7 +142,7 @@ fn schema_version(db: &Connection) -> Result<(bool, u8)> {
.prepare("select null from sqlite_master where type = 'table' and name = 'col'")?
.exists(NO_PARAMS)?
{
return Ok((true, SCHEMA_MAX_VERSION));
return Ok((true, SCHEMA_STARTING_VERSION));
}
Ok((
@ -157,36 +158,77 @@ fn trace(s: &str) {
impl SqliteStorage {
pub(crate) fn open_or_create(path: &Path) -> Result<Self> {
let db = open_or_create_collection_db(path)?;
let (create, ver) = schema_version(&db)?;
if ver > SCHEMA_MAX_VERSION {
return Err(AnkiError::DBError {
info: "".to_string(),
kind: DBErrorKind::FileTooNew,
});
}
if ver < SCHEMA_MIN_VERSION {
return Err(AnkiError::DBError {
info: "".to_string(),
kind: DBErrorKind::FileTooOld,
});
}
let upgrade = ver != SCHEMA_MAX_VERSION;
if create || upgrade {
db.execute("begin exclusive", NO_PARAMS)?;
}
if create {
db.prepare_cached("begin exclusive")?.execute(NO_PARAMS)?;
db.execute_batch(include_str!("schema11.sql"))?;
// start at schema 11, then upgrade below
db.execute(
"update col set crt=?, ver=?",
params![TimestampSecs::now(), ver],
params![TimestampSecs::now(), SCHEMA_STARTING_VERSION],
)?;
db.prepare_cached("commit")?.execute(NO_PARAMS)?;
} else {
if ver > SCHEMA_MAX_VERSION {
return Err(AnkiError::DBError {
info: "".to_string(),
kind: DBErrorKind::FileTooNew,
});
}
if ver < SCHEMA_MIN_VERSION {
return Err(AnkiError::DBError {
info: "".to_string(),
kind: DBErrorKind::FileTooOld,
});
}
};
}
let storage = Self { db };
if create || upgrade {
storage.upgrade_to_latest_schema(ver)?;
}
if create {
storage.add_default_deck_config()?;
}
if create || upgrade {
storage.commit_trx()?;
}
Ok(storage)
}
fn upgrade_to_latest_schema(&self, ver: u8) -> Result<()> {
if ver < 12 {
self.upgrade_to_schema_12()?;
}
Ok(())
}
fn upgrade_to_schema_12(&self) -> Result<()> {
self.db
.execute_batch(include_str!("schema12_upgrade.sql"))?;
self.upgrade_deck_conf_to_schema12()
}
pub(crate) fn downgrade_to_schema_11(self) -> Result<()> {
self.begin_trx()?;
self.downgrade_from_schema_12()?;
self.commit_trx()
}
fn downgrade_from_schema_12(&self) -> Result<()> {
self.downgrade_deck_conf_from_schema12()?;
self.db
.execute_batch(include_str!("schema12_downgrade.sql"))?;
Ok(())
}
// Standard transaction start/stop
//////////////////////////////////////