mirror of
https://github.com/ankitects/anki.git
synced 2025-09-18 14:02:21 -04:00
526 lines
17 KiB
Rust
526 lines
17 KiB
Rust
// Copyright: Ankitects Pty Ltd and contributors
|
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
|
|
|
mod burying;
|
|
mod gathering;
|
|
pub(crate) mod intersperser;
|
|
pub(crate) mod sized_chain;
|
|
mod sorting;
|
|
|
|
use std::collections::HashMap;
|
|
use std::collections::VecDeque;
|
|
|
|
use intersperser::Intersperser;
|
|
use sized_chain::SizedChain;
|
|
|
|
use super::BuryMode;
|
|
use super::CardQueues;
|
|
use super::Counts;
|
|
use super::LearningQueueEntry;
|
|
use super::MainQueueEntry;
|
|
use super::MainQueueEntryKind;
|
|
use crate::deckconfig::NewCardGatherPriority;
|
|
use crate::deckconfig::NewCardSortOrder;
|
|
use crate::deckconfig::ReviewCardOrder;
|
|
use crate::deckconfig::ReviewMix;
|
|
use crate::decks::limits::LimitTreeMap;
|
|
use crate::prelude::*;
|
|
use crate::scheduler::timing::SchedTimingToday;
|
|
|
|
/// Temporary holder for review cards that will be built into a queue.
|
|
#[derive(Debug, Clone, Copy)]
|
|
pub(crate) struct DueCard {
|
|
pub id: CardId,
|
|
pub note_id: NoteId,
|
|
pub mtime: TimestampSecs,
|
|
pub due: i32,
|
|
pub current_deck_id: DeckId,
|
|
pub original_deck_id: DeckId,
|
|
pub kind: DueCardKind,
|
|
}
|
|
|
|
#[derive(Debug, Clone, Copy)]
|
|
pub(crate) enum DueCardKind {
|
|
Review,
|
|
Learning,
|
|
}
|
|
|
|
/// Temporary holder for new cards that will be built into a queue.
|
|
#[derive(Debug, Default, Clone, Copy)]
|
|
pub(crate) struct NewCard {
|
|
pub id: CardId,
|
|
pub note_id: NoteId,
|
|
pub mtime: TimestampSecs,
|
|
pub current_deck_id: DeckId,
|
|
pub original_deck_id: DeckId,
|
|
pub template_index: u32,
|
|
pub hash: u64,
|
|
}
|
|
|
|
impl From<DueCard> for MainQueueEntry {
|
|
fn from(c: DueCard) -> Self {
|
|
MainQueueEntry {
|
|
id: c.id,
|
|
mtime: c.mtime,
|
|
kind: match c.kind {
|
|
DueCardKind::Review => MainQueueEntryKind::Review,
|
|
DueCardKind::Learning => MainQueueEntryKind::InterdayLearning,
|
|
},
|
|
}
|
|
}
|
|
}
|
|
|
|
impl From<NewCard> for MainQueueEntry {
|
|
fn from(c: NewCard) -> Self {
|
|
MainQueueEntry {
|
|
id: c.id,
|
|
mtime: c.mtime,
|
|
kind: MainQueueEntryKind::New,
|
|
}
|
|
}
|
|
}
|
|
|
|
impl From<DueCard> for LearningQueueEntry {
|
|
fn from(c: DueCard) -> Self {
|
|
LearningQueueEntry {
|
|
due: TimestampSecs(c.due as i64),
|
|
id: c.id,
|
|
mtime: c.mtime,
|
|
}
|
|
}
|
|
}
|
|
|
|
#[derive(Default, Clone, Debug)]
|
|
pub(super) struct QueueSortOptions {
|
|
pub(super) new_order: NewCardSortOrder,
|
|
pub(super) new_gather_priority: NewCardGatherPriority,
|
|
pub(super) review_order: ReviewCardOrder,
|
|
pub(super) day_learn_mix: ReviewMix,
|
|
pub(super) new_review_mix: ReviewMix,
|
|
}
|
|
|
|
#[derive(Debug, Clone)]
|
|
pub(super) struct QueueBuilder {
|
|
pub(super) new: Vec<NewCard>,
|
|
pub(super) review: Vec<DueCard>,
|
|
pub(super) learning: Vec<DueCard>,
|
|
pub(super) day_learning: Vec<DueCard>,
|
|
limits: LimitTreeMap,
|
|
context: Context,
|
|
}
|
|
|
|
/// Data container and helper for building queues.
|
|
#[derive(Debug, Clone)]
|
|
struct Context {
|
|
timing: SchedTimingToday,
|
|
config_map: HashMap<DeckConfigId, DeckConfig>,
|
|
root_deck: Deck,
|
|
sort_options: QueueSortOptions,
|
|
seen_note_ids: HashMap<NoteId, BuryMode>,
|
|
deck_map: HashMap<DeckId, Deck>,
|
|
fsrs: bool,
|
|
}
|
|
|
|
impl QueueBuilder {
|
|
pub(super) fn new(col: &mut Collection, deck_id: DeckId) -> Result<Self> {
|
|
let timing = col.timing_for_timestamp(TimestampSecs::now())?;
|
|
let new_cards_ignore_review_limit = col.get_config_bool(BoolKey::NewCardsIgnoreReviewLimit);
|
|
let apply_all_parent_limits = col.get_config_bool(BoolKey::ApplyAllParentLimits);
|
|
let config_map = col.storage.get_deck_config_map()?;
|
|
let root_deck = col.storage.get_deck(deck_id)?.or_not_found(deck_id)?;
|
|
let mut decks = col.storage.child_decks(&root_deck)?;
|
|
decks.insert(0, root_deck.clone());
|
|
if apply_all_parent_limits {
|
|
for parent in col.storage.parent_decks(&root_deck)? {
|
|
decks.insert(0, parent);
|
|
}
|
|
}
|
|
let limits = LimitTreeMap::build(
|
|
&decks,
|
|
&config_map,
|
|
timing.days_elapsed,
|
|
new_cards_ignore_review_limit,
|
|
);
|
|
let sort_options = sort_options(&root_deck, &config_map);
|
|
let deck_map = col.storage.get_decks_map()?;
|
|
|
|
Ok(QueueBuilder {
|
|
new: Vec::new(),
|
|
review: Vec::new(),
|
|
learning: Vec::new(),
|
|
day_learning: Vec::new(),
|
|
limits,
|
|
context: Context {
|
|
timing,
|
|
config_map,
|
|
root_deck,
|
|
sort_options,
|
|
seen_note_ids: HashMap::new(),
|
|
deck_map,
|
|
fsrs: col.get_config_bool(BoolKey::Fsrs),
|
|
},
|
|
})
|
|
}
|
|
|
|
pub(super) fn build(mut self, learn_ahead_secs: i64) -> CardQueues {
|
|
self.sort_new();
|
|
|
|
// intraday learning and total learn count
|
|
let intraday_learning = sort_learning(self.learning);
|
|
let now = TimestampSecs::now();
|
|
let cutoff = now.adding_secs(learn_ahead_secs);
|
|
let learn_count = intraday_learning
|
|
.iter()
|
|
.take_while(|e| e.due <= cutoff)
|
|
.count()
|
|
+ self.day_learning.len();
|
|
|
|
let review_count = self.review.len();
|
|
let new_count = self.new.len();
|
|
|
|
// merge interday and new cards into main
|
|
let with_interday_learn = merge_day_learning(
|
|
self.review,
|
|
self.day_learning,
|
|
self.context.sort_options.day_learn_mix,
|
|
);
|
|
let main_iter = merge_new(
|
|
with_interday_learn,
|
|
self.new,
|
|
self.context.sort_options.new_review_mix,
|
|
);
|
|
|
|
CardQueues {
|
|
counts: Counts {
|
|
new: new_count,
|
|
review: review_count,
|
|
learning: learn_count,
|
|
},
|
|
main: main_iter.collect(),
|
|
intraday_learning,
|
|
learn_ahead_secs,
|
|
current_day: self.context.timing.days_elapsed,
|
|
build_time: TimestampMillis::now(),
|
|
current_learning_cutoff: now,
|
|
}
|
|
}
|
|
}
|
|
|
|
fn sort_options(deck: &Deck, config_map: &HashMap<DeckConfigId, DeckConfig>) -> QueueSortOptions {
|
|
deck.config_id()
|
|
.and_then(|config_id| config_map.get(&config_id))
|
|
.map(|config| QueueSortOptions {
|
|
new_order: config.inner.new_card_sort_order(),
|
|
new_gather_priority: config.inner.new_card_gather_priority(),
|
|
review_order: config.inner.review_order(),
|
|
day_learn_mix: config.inner.interday_learning_mix(),
|
|
new_review_mix: config.inner.new_mix(),
|
|
})
|
|
.unwrap_or_else(|| {
|
|
// filtered decks do not space siblings
|
|
QueueSortOptions {
|
|
new_order: NewCardSortOrder::NoSort,
|
|
..Default::default()
|
|
}
|
|
})
|
|
}
|
|
|
|
fn merge_day_learning(
|
|
reviews: Vec<DueCard>,
|
|
day_learning: Vec<DueCard>,
|
|
mode: ReviewMix,
|
|
) -> Box<dyn ExactSizeIterator<Item = MainQueueEntry>> {
|
|
let day_learning_iter = day_learning.into_iter().map(Into::into);
|
|
let reviews_iter = reviews.into_iter().map(Into::into);
|
|
|
|
match mode {
|
|
ReviewMix::AfterReviews => Box::new(SizedChain::new(reviews_iter, day_learning_iter)),
|
|
ReviewMix::BeforeReviews => Box::new(SizedChain::new(day_learning_iter, reviews_iter)),
|
|
ReviewMix::MixWithReviews => Box::new(Intersperser::new(reviews_iter, day_learning_iter)),
|
|
}
|
|
}
|
|
|
|
fn merge_new(
|
|
review_iter: impl ExactSizeIterator<Item = MainQueueEntry> + 'static,
|
|
new: Vec<NewCard>,
|
|
mode: ReviewMix,
|
|
) -> Box<dyn ExactSizeIterator<Item = MainQueueEntry>> {
|
|
let new_iter = new.into_iter().map(Into::into);
|
|
|
|
match mode {
|
|
ReviewMix::BeforeReviews => Box::new(SizedChain::new(new_iter, review_iter)),
|
|
ReviewMix::AfterReviews => Box::new(SizedChain::new(review_iter, new_iter)),
|
|
ReviewMix::MixWithReviews => Box::new(Intersperser::new(review_iter, new_iter)),
|
|
}
|
|
}
|
|
|
|
fn sort_learning(mut learning: Vec<DueCard>) -> VecDeque<LearningQueueEntry> {
|
|
learning.sort_unstable_by(|a, b| a.due.cmp(&b.due));
|
|
learning.into_iter().map(LearningQueueEntry::from).collect()
|
|
}
|
|
|
|
impl Collection {
|
|
pub(crate) fn build_queues(&mut self, deck_id: DeckId) -> Result<CardQueues> {
|
|
let mut queues = QueueBuilder::new(self, deck_id)?;
|
|
self.storage
|
|
.update_active_decks(&queues.context.root_deck)?;
|
|
|
|
queues.gather_cards(self)?;
|
|
|
|
let queues = queues.build(self.learn_ahead_secs() as i64);
|
|
|
|
Ok(queues)
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod test {
|
|
use anki_proto::deck_config::deck_config::config::NewCardGatherPriority;
|
|
use anki_proto::deck_config::deck_config::config::NewCardSortOrder;
|
|
|
|
use super::*;
|
|
use crate::card::CardQueue;
|
|
use crate::card::CardType;
|
|
|
|
impl Collection {
|
|
fn set_deck_gather_order(&mut self, deck: &mut Deck, order: NewCardGatherPriority) {
|
|
let mut conf = DeckConfig::default();
|
|
conf.inner.new_card_gather_priority = order as i32;
|
|
conf.inner.new_card_sort_order = NewCardSortOrder::NoSort as i32;
|
|
self.add_or_update_deck_config(&mut conf).unwrap();
|
|
deck.normal_mut().unwrap().config_id = conf.id.0;
|
|
self.add_or_update_deck(deck).unwrap();
|
|
}
|
|
|
|
fn set_deck_new_limit(&mut self, deck: &mut Deck, new_limit: u32) {
|
|
let mut conf = DeckConfig::default();
|
|
conf.inner.new_per_day = new_limit;
|
|
self.add_or_update_deck_config(&mut conf).unwrap();
|
|
deck.normal_mut().unwrap().config_id = conf.id.0;
|
|
self.add_or_update_deck(deck).unwrap();
|
|
}
|
|
|
|
fn set_deck_review_limit(&mut self, deck: DeckId, limit: u32) {
|
|
let dcid = self.get_deck(deck).unwrap().unwrap().config_id().unwrap();
|
|
let mut conf = self.get_deck_config(dcid, false).unwrap().unwrap();
|
|
conf.inner.reviews_per_day = limit;
|
|
self.add_or_update_deck_config(&mut conf).unwrap();
|
|
}
|
|
|
|
fn queue_as_deck_and_template(&mut self, deck_id: DeckId) -> Vec<(DeckId, u16)> {
|
|
self.build_queues(deck_id)
|
|
.unwrap()
|
|
.iter()
|
|
.map(|entry| {
|
|
let card = self.storage.get_card(entry.card_id()).unwrap().unwrap();
|
|
(card.deck_id, card.template_idx)
|
|
})
|
|
.collect()
|
|
}
|
|
|
|
fn set_deck_review_order(&mut self, deck: &mut Deck, order: ReviewCardOrder) {
|
|
let mut conf = DeckConfig::default();
|
|
conf.inner.review_order = order as i32;
|
|
self.add_or_update_deck_config(&mut conf).unwrap();
|
|
deck.normal_mut().unwrap().config_id = conf.id.0;
|
|
self.add_or_update_deck(deck).unwrap();
|
|
}
|
|
|
|
fn queue_as_due_and_ivl(&mut self, deck_id: DeckId) -> Vec<(i32, u32)> {
|
|
self.build_queues(deck_id)
|
|
.unwrap()
|
|
.iter()
|
|
.map(|entry| {
|
|
let card = self.storage.get_card(entry.card_id()).unwrap().unwrap();
|
|
(card.due, card.interval)
|
|
})
|
|
.collect()
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn should_build_empty_queue_if_limit_is_reached() {
|
|
let mut col = Collection::new();
|
|
CardAdder::new().due_dates(["0"]).add(&mut col);
|
|
col.set_deck_review_limit(DeckId(1), 0);
|
|
assert_eq!(col.queue_as_deck_and_template(DeckId(1)), vec![]);
|
|
}
|
|
|
|
#[test]
|
|
fn new_queue_building() -> Result<()> {
|
|
let mut col = Collection::new();
|
|
|
|
// parent
|
|
// ┣━━child━━grandchild
|
|
// ┗━━child_2
|
|
let mut parent = DeckAdder::new("parent").add(&mut col);
|
|
let mut child = DeckAdder::new("parent::child").add(&mut col);
|
|
let child_2 = DeckAdder::new("parent::child_2").add(&mut col);
|
|
let grandchild = DeckAdder::new("parent::child::grandchild").add(&mut col);
|
|
|
|
// add 2 new cards to each deck
|
|
for deck in [&parent, &child, &child_2, &grandchild] {
|
|
CardAdder::new().siblings(2).deck(deck.id).add(&mut col);
|
|
}
|
|
|
|
// set child's new limit to 3, which should affect grandchild
|
|
col.set_deck_new_limit(&mut child, 3);
|
|
|
|
// depth-first tree order
|
|
col.set_deck_gather_order(&mut parent, NewCardGatherPriority::Deck);
|
|
let cards = vec![
|
|
(parent.id, 0),
|
|
(parent.id, 1),
|
|
(child.id, 0),
|
|
(child.id, 1),
|
|
(grandchild.id, 0),
|
|
(child_2.id, 0),
|
|
(child_2.id, 1),
|
|
];
|
|
assert_eq!(col.queue_as_deck_and_template(parent.id), cards);
|
|
|
|
// insertion order
|
|
col.set_deck_gather_order(&mut parent, NewCardGatherPriority::LowestPosition);
|
|
let cards = vec![
|
|
(parent.id, 0),
|
|
(parent.id, 1),
|
|
(child.id, 0),
|
|
(child.id, 1),
|
|
(child_2.id, 0),
|
|
(child_2.id, 1),
|
|
(grandchild.id, 0),
|
|
];
|
|
assert_eq!(col.queue_as_deck_and_template(parent.id), cards);
|
|
|
|
// inverted insertion order, but sibling order is preserved
|
|
col.set_deck_gather_order(&mut parent, NewCardGatherPriority::HighestPosition);
|
|
let cards = vec![
|
|
(grandchild.id, 0),
|
|
(grandchild.id, 1),
|
|
(child_2.id, 0),
|
|
(child_2.id, 1),
|
|
(child.id, 0),
|
|
(parent.id, 0),
|
|
(parent.id, 1),
|
|
];
|
|
assert_eq!(col.queue_as_deck_and_template(parent.id), cards);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
#[test]
|
|
fn review_queue_building() -> Result<()> {
|
|
let mut col = Collection::new();
|
|
|
|
let mut deck = col.get_or_create_normal_deck("Default").unwrap();
|
|
let nt = col.get_notetype_by_name("Basic")?.unwrap();
|
|
let mut cards = vec![];
|
|
|
|
// relative overdueness
|
|
let expected_queue = vec![
|
|
(-150, 1),
|
|
(-100, 1),
|
|
(-50, 1),
|
|
(-150, 5),
|
|
(-100, 5),
|
|
(-50, 5),
|
|
(-150, 20),
|
|
(-150, 20),
|
|
(-100, 20),
|
|
(-50, 20),
|
|
(-150, 100),
|
|
(-100, 100),
|
|
(-50, 100),
|
|
(0, 1),
|
|
(0, 5),
|
|
(0, 20),
|
|
(0, 100),
|
|
];
|
|
for t in expected_queue.iter() {
|
|
let mut note = nt.new_note();
|
|
note.set_field(0, "foo")?;
|
|
note.id.0 = 0;
|
|
col.add_note(&mut note, deck.id)?;
|
|
let mut card = col.storage.get_card_by_ordinal(note.id, 0)?.unwrap();
|
|
card.interval = t.1;
|
|
card.due = t.0;
|
|
card.ctype = CardType::Review;
|
|
card.queue = CardQueue::Review;
|
|
cards.push(card);
|
|
}
|
|
col.update_cards_maybe_undoable(cards, false)?;
|
|
col.set_deck_review_order(&mut deck, ReviewCardOrder::RelativeOverdueness);
|
|
assert_eq!(col.queue_as_due_and_ivl(deck.id), expected_queue);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
impl Collection {
|
|
fn card_queue_len(&mut self) -> usize {
|
|
self.get_queued_cards(5, false).unwrap().cards.len()
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn new_card_potentially_burying_review_card() {
|
|
let mut col = Collection::new();
|
|
// add one new and one review card
|
|
CardAdder::new().siblings(2).due_dates(["0"]).add(&mut col);
|
|
// Potentially problematic config: New cards are shown first and would bury
|
|
// review siblings. This poses a problem because we gather review cards first.
|
|
col.update_default_deck_config(|config| {
|
|
config.new_mix = ReviewMix::BeforeReviews as i32;
|
|
config.bury_new = false;
|
|
config.bury_reviews = true;
|
|
});
|
|
|
|
let old_queue_len = col.card_queue_len();
|
|
col.answer_easy();
|
|
col.clear_study_queues();
|
|
|
|
// The number of cards in the queue must decrease by exactly 1, either because
|
|
// no burying was performed, or the first built queue anticipated it and didn't
|
|
// include the buried card.
|
|
assert_eq!(col.card_queue_len(), old_queue_len - 1);
|
|
}
|
|
|
|
#[test]
|
|
fn new_cards_may_ignore_review_limit() {
|
|
let mut col = Collection::new();
|
|
col.set_config_bool(BoolKey::NewCardsIgnoreReviewLimit, true, false)
|
|
.unwrap();
|
|
col.update_default_deck_config(|config| {
|
|
config.reviews_per_day = 0;
|
|
});
|
|
CardAdder::new().add(&mut col);
|
|
|
|
// review limit doesn't apply to new card
|
|
assert_eq!(col.card_queue_len(), 1);
|
|
}
|
|
|
|
#[test]
|
|
fn reviews_dont_affect_new_limit_before_review_limit_is_reached() {
|
|
let mut col = Collection::new();
|
|
col.update_default_deck_config(|config| {
|
|
config.new_per_day = 1;
|
|
});
|
|
CardAdder::new().siblings(2).due_dates(["0"]).add(&mut col);
|
|
assert_eq!(col.card_queue_len(), 2);
|
|
}
|
|
|
|
#[test]
|
|
fn may_apply_parent_limits() {
|
|
let mut col = Collection::new();
|
|
col.set_config_bool(BoolKey::ApplyAllParentLimits, true, false)
|
|
.unwrap();
|
|
col.update_default_deck_config(|config| {
|
|
config.new_per_day = 0;
|
|
});
|
|
let child = DeckAdder::new("Default::child")
|
|
.with_config(|_| ())
|
|
.add(&mut col);
|
|
CardAdder::new().deck(child.id).add(&mut col);
|
|
col.set_current_deck(child.id).unwrap();
|
|
assert_eq!(col.card_queue_len(), 0);
|
|
}
|
|
}
|