mirror of
https://github.com/ankitects/anki.git
synced 2025-09-24 16:56:36 -04:00
Rework v3 queue building to respect parent limits
This commit is contained in:
parent
94359a5d2f
commit
681ca60b70
7 changed files with 454 additions and 136 deletions
|
@ -11,6 +11,7 @@ use crate::{
|
|||
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub(crate) struct RemainingLimits {
|
||||
pub deck_id: DeckId,
|
||||
pub review: u32,
|
||||
pub new: u32,
|
||||
}
|
||||
|
@ -25,6 +26,7 @@ impl RemainingLimits {
|
|||
rev_today += new_today;
|
||||
}
|
||||
RemainingLimits {
|
||||
deck_id: deck.id,
|
||||
review: ((config.inner.reviews_per_day as i32) - rev_today).max(0) as u32,
|
||||
new: ((config.inner.new_per_day as i32) - new_today).max(0) as u32,
|
||||
}
|
||||
|
@ -41,6 +43,7 @@ impl RemainingLimits {
|
|||
impl Default for RemainingLimits {
|
||||
fn default() -> Self {
|
||||
RemainingLimits {
|
||||
deck_id: DeckId(1),
|
||||
review: 9999,
|
||||
new: 9999,
|
||||
}
|
||||
|
|
261
rslib/src/scheduler/queue/builder/context.rs
Normal file
261
rslib/src/scheduler/queue/builder/context.rs
Normal file
|
@ -0,0 +1,261 @@
|
|||
// Copyright: Ankitects Pty Ltd and contributors
|
||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use id_tree::{InsertBehavior, Node, NodeId, Tree, TreeBuilder};
|
||||
|
||||
use super::{BuryMode, QueueSortOptions};
|
||||
use crate::{
|
||||
deckconfig::NewCardSortOrder, decks::limits::RemainingLimits, prelude::*,
|
||||
scheduler::timing::SchedTimingToday,
|
||||
};
|
||||
|
||||
/// Data container and helper for building queues.
|
||||
#[derive(Debug, Clone)]
|
||||
pub(super) struct Context {
|
||||
pub(super) timing: SchedTimingToday,
|
||||
config_map: HashMap<DeckConfigId, DeckConfig>,
|
||||
/// The active decks.
|
||||
pub(super) decks: Vec<Deck>,
|
||||
pub(super) limits: LimitTreeMap,
|
||||
pub(super) sort_options: QueueSortOptions,
|
||||
deck_map: HashMap<DeckId, Deck>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(super) struct LimitTreeMap {
|
||||
/// A tree representing the remaining limits of the active deck hierarchy.
|
||||
//
|
||||
// As long as we never (1) allow a tree without a root and (2) remove nodes,
|
||||
// it's safe to unwrap on Tree::get() and Tree::root_node_id(), even if we
|
||||
// clone Nodes.
|
||||
tree: Tree<RemainingLimits>,
|
||||
/// A map to access the tree node of a deck. Only decks with a remaining
|
||||
/// limit above zero are included.
|
||||
map: HashMap<DeckId, NodeId>,
|
||||
initial_root_limits: RemainingLimits,
|
||||
}
|
||||
|
||||
impl LimitTreeMap {
|
||||
/// Returns the newly built [LimitTreeMap] and the represented decks in depth-first order.
|
||||
fn build(
|
||||
col: &mut Collection,
|
||||
deck_id: DeckId,
|
||||
config: &HashMap<DeckConfigId, DeckConfig>,
|
||||
today: u32,
|
||||
) -> Result<(Self, Vec<Deck>)> {
|
||||
let mut decks = vec![col.storage.get_deck(deck_id)?.ok_or(AnkiError::NotFound)?];
|
||||
|
||||
let root_config = decks[0].config_id().and_then(|id| config.get(&id));
|
||||
let initial_root_limits = RemainingLimits::new(&decks[0], root_config, today, true);
|
||||
let tree = TreeBuilder::new()
|
||||
.with_root(Node::new(initial_root_limits))
|
||||
.build();
|
||||
|
||||
let parent_node_id = tree.root_node_id().unwrap().clone();
|
||||
let mut map = HashMap::new();
|
||||
map.insert(deck_id, parent_node_id.clone());
|
||||
|
||||
let mut limits = Self {
|
||||
tree,
|
||||
map,
|
||||
initial_root_limits,
|
||||
};
|
||||
decks = limits.add_descendant_nodes(
|
||||
col,
|
||||
&parent_node_id,
|
||||
initial_root_limits,
|
||||
decks,
|
||||
config,
|
||||
today,
|
||||
)?;
|
||||
|
||||
Ok((limits, decks))
|
||||
}
|
||||
|
||||
/// Recursively appends all descendants to the provided [NodeMut], adding their
|
||||
/// [NodeId]s to the [HashMap] and appending their [Deck]s to the [Vec<Deck>],
|
||||
/// which is returned.
|
||||
///
|
||||
/// The [NodeMut] is assumed to represent the last [Deck] in the [Vec<Deck>].
|
||||
/// [RemainingLimits] are capped to their parent's limits.
|
||||
/// [Deck]s with empty review limits are _not_ added to the [HashMap].
|
||||
fn add_descendant_nodes(
|
||||
&mut self,
|
||||
col: &mut Collection,
|
||||
parent_node_id: &NodeId,
|
||||
parent_limits: RemainingLimits,
|
||||
mut decks: Vec<Deck>,
|
||||
config: &HashMap<DeckConfigId, DeckConfig>,
|
||||
today: u32,
|
||||
) -> Result<Vec<Deck>> {
|
||||
for child_deck in col.storage.immediate_child_decks(&decks[decks.len() - 1])? {
|
||||
let mut child_limits = RemainingLimits::new(
|
||||
&child_deck,
|
||||
child_deck.config_id().and_then(|id| config.get(&id)),
|
||||
today,
|
||||
true,
|
||||
);
|
||||
child_limits.cap_to(parent_limits);
|
||||
|
||||
let child_node_id = self
|
||||
.tree
|
||||
.insert(
|
||||
Node::new(child_limits),
|
||||
InsertBehavior::UnderNode(&parent_node_id),
|
||||
)
|
||||
.unwrap();
|
||||
if child_limits.review > 0 {
|
||||
self.map.insert(child_deck.id, child_node_id.clone());
|
||||
}
|
||||
|
||||
decks.push(child_deck);
|
||||
decks =
|
||||
self.add_descendant_nodes(col, &child_node_id, child_limits, decks, config, today)?;
|
||||
}
|
||||
|
||||
Ok(decks)
|
||||
}
|
||||
}
|
||||
|
||||
impl Context {
|
||||
pub(super) fn new(col: &mut Collection, deck_id: DeckId) -> Result<Self> {
|
||||
let timing = col.timing_for_timestamp(TimestampSecs::now())?;
|
||||
let config_map = col.storage.get_deck_config_map()?;
|
||||
let (limits, decks) = LimitTreeMap::build(col, deck_id, &config_map, timing.days_elapsed)?;
|
||||
let sort_options = sort_options(&decks[0], &config_map);
|
||||
let deck_map = col.storage.get_decks_map()?;
|
||||
|
||||
Ok(Self {
|
||||
timing,
|
||||
config_map,
|
||||
decks,
|
||||
limits,
|
||||
sort_options,
|
||||
deck_map,
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn root_deck(&self) -> &Deck {
|
||||
&self.decks[0]
|
||||
}
|
||||
|
||||
pub(super) fn active_deck_ids(&self) -> Vec<DeckId> {
|
||||
self.decks.iter().map(|deck| deck.id).collect()
|
||||
}
|
||||
|
||||
pub(super) fn bury_mode(&self, deck_id: DeckId) -> BuryMode {
|
||||
self.deck_map
|
||||
.get(&deck_id)
|
||||
.and_then(|deck| deck.config_id())
|
||||
.and_then(|config_id| self.config_map.get(&config_id))
|
||||
.map(|config| BuryMode {
|
||||
bury_new: config.inner.bury_new,
|
||||
bury_reviews: config.inner.bury_reviews,
|
||||
})
|
||||
.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
impl LimitTreeMap {
|
||||
pub(super) fn is_exhausted_root(&self) -> bool {
|
||||
self.map.is_empty()
|
||||
}
|
||||
|
||||
pub(super) fn is_exhausted(&self, deck_id: DeckId) -> bool {
|
||||
self.map.get(&deck_id).is_some()
|
||||
}
|
||||
|
||||
pub(super) fn remaining_node_id(&self, deck_id: DeckId) -> Option<NodeId> {
|
||||
self.map.get(&deck_id).map(Clone::clone)
|
||||
}
|
||||
|
||||
pub(super) fn decrement_node_and_parent_review(&mut self, node_id: &NodeId) {
|
||||
let node = self.tree.get_mut(node_id).unwrap();
|
||||
let parent = node.parent().map(Clone::clone);
|
||||
|
||||
let mut limit = node.data_mut();
|
||||
limit.review -= 1;
|
||||
if limit.review < 1 {
|
||||
self.remove_node_and_descendants_from_map(node_id);
|
||||
}
|
||||
|
||||
if let Some(parent_id) = parent {
|
||||
self.decrement_node_and_parent_review(&parent_id)
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn decrement_node_and_parent_new(&mut self, node_id: &NodeId) {
|
||||
let node = self.tree.get_mut(node_id).unwrap();
|
||||
let parent = node.parent().map(Clone::clone);
|
||||
|
||||
let mut limit = node.data_mut();
|
||||
limit.new -= 1;
|
||||
if limit.new < 1 {
|
||||
self.remove_node_and_descendants_from_map(node_id);
|
||||
}
|
||||
|
||||
if let Some(parent_id) = parent {
|
||||
self.decrement_node_and_parent_new(&parent_id)
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn remove_node_and_descendants_from_map(&mut self, node_id: &NodeId) {
|
||||
let node = self.tree.get(node_id).unwrap();
|
||||
self.map.remove(&node.data().deck_id);
|
||||
|
||||
for child_id in node.children().clone() {
|
||||
self.remove_node_and_descendants_from_map(&child_id);
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn cap_new_to_review(&mut self) {
|
||||
self.cap_new_to_review_rec(&self.tree.root_node_id().unwrap().clone(), 9999);
|
||||
}
|
||||
|
||||
fn cap_new_to_review_rec(&mut self, node_id: &NodeId, parent_limit: u32) {
|
||||
let node = self.tree.get_mut(node_id).unwrap();
|
||||
let limit = node.data_mut();
|
||||
limit.new = limit.new.min(limit.review).min(parent_limit);
|
||||
let node_limit = limit.new;
|
||||
|
||||
for child_id in node.children().clone() {
|
||||
self.cap_new_to_review_rec(&child_id, node_limit);
|
||||
}
|
||||
}
|
||||
|
||||
/// The configured review and new limits of the root deck, but with the new
|
||||
/// limit capped to the remaining reviews.
|
||||
pub(super) fn final_limits(&self) -> RemainingLimits {
|
||||
RemainingLimits {
|
||||
new: self.initial_root_limits.new.min(
|
||||
self.tree
|
||||
.get(self.tree.root_node_id().unwrap())
|
||||
.unwrap()
|
||||
.data()
|
||||
.review,
|
||||
),
|
||||
..self.initial_root_limits
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn sort_options(deck: &Deck, config_map: &HashMap<DeckConfigId, DeckConfig>) -> QueueSortOptions {
|
||||
deck.config_id()
|
||||
.and_then(|config_id| config_map.get(&config_id))
|
||||
.map(|config| QueueSortOptions {
|
||||
new_order: config.inner.new_card_sort_order(),
|
||||
new_gather_priority: config.inner.new_card_gather_priority(),
|
||||
review_order: config.inner.review_order(),
|
||||
day_learn_mix: config.inner.interday_learning_mix(),
|
||||
new_review_mix: config.inner.new_mix(),
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
// filtered decks do not space siblings
|
||||
QueueSortOptions {
|
||||
new_order: NewCardSortOrder::LowestPosition,
|
||||
..Default::default()
|
||||
}
|
||||
})
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
// Copyright: Ankitects Pty Ltd and contributors
|
||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
|
||||
mod context;
|
||||
mod gathering;
|
||||
pub(crate) mod intersperser;
|
||||
pub(crate) mod sized_chain;
|
||||
|
@ -11,15 +12,16 @@ use std::collections::{HashMap, VecDeque};
|
|||
use intersperser::Intersperser;
|
||||
use sized_chain::SizedChain;
|
||||
|
||||
use self::context::Context;
|
||||
use super::{CardQueues, Counts, LearningQueueEntry, MainQueueEntry, MainQueueEntryKind};
|
||||
use crate::{
|
||||
deckconfig::{NewCardGatherPriority, NewCardSortOrder, ReviewCardOrder, ReviewMix},
|
||||
decks::limits::{remaining_limits_map, RemainingLimits},
|
||||
decks::limits::RemainingLimits,
|
||||
prelude::*,
|
||||
};
|
||||
|
||||
/// Temporary holder for review cards that will be built into a queue.
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub(crate) struct DueCard {
|
||||
pub id: CardId,
|
||||
pub note_id: NoteId,
|
||||
|
@ -43,6 +45,7 @@ pub(crate) struct NewCard {
|
|||
pub note_id: NoteId,
|
||||
pub mtime: TimestampSecs,
|
||||
pub due: i32,
|
||||
pub current_deck_id: DeckId,
|
||||
pub original_deck_id: DeckId,
|
||||
pub template_index: u32,
|
||||
pub hash: u64,
|
||||
|
@ -204,134 +207,126 @@ fn sort_learning(mut learning: Vec<DueCard>) -> VecDeque<LearningQueueEntry> {
|
|||
|
||||
impl Collection {
|
||||
pub(crate) fn build_queues(&mut self, deck_id: DeckId) -> Result<CardQueues> {
|
||||
let now = TimestampSecs::now();
|
||||
let timing = self.timing_for_timestamp(now)?;
|
||||
let decks = self.storage.deck_with_children(deck_id)?;
|
||||
// need full map, since filtered decks may contain cards from decks
|
||||
// outside tree
|
||||
let deck_map = self.storage.get_decks_map()?;
|
||||
let config = self.storage.get_deck_config_map()?;
|
||||
let sort_options = decks[0]
|
||||
.config_id()
|
||||
.and_then(|config_id| config.get(&config_id))
|
||||
.map(|config| QueueSortOptions {
|
||||
new_order: config.inner.new_card_sort_order(),
|
||||
new_gather_priority: config.inner.new_card_gather_priority(),
|
||||
review_order: config.inner.review_order(),
|
||||
day_learn_mix: config.inner.interday_learning_mix(),
|
||||
new_review_mix: config.inner.new_mix(),
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
// filtered decks do not space siblings
|
||||
QueueSortOptions {
|
||||
new_order: NewCardSortOrder::LowestPosition,
|
||||
..Default::default()
|
||||
}
|
||||
});
|
||||
let mut ctx = Context::new(self, deck_id)?;
|
||||
self.storage.update_active_decks(ctx.root_deck())?;
|
||||
let mut queues = QueueBuilder::new(ctx.sort_options.clone());
|
||||
|
||||
// fetch remaining limits, and cap to selected deck limits so that we don't
|
||||
// do more work than necessary
|
||||
let mut remaining = remaining_limits_map(decks.iter(), &config, timing.days_elapsed, true);
|
||||
let selected_deck_limits_at_start = *remaining.get(&deck_id).unwrap();
|
||||
let mut selected_deck_limits = selected_deck_limits_at_start;
|
||||
for limit in remaining.values_mut() {
|
||||
limit.cap_to(selected_deck_limits);
|
||||
}
|
||||
self.add_intraday_learning_cards(&mut queues, &mut ctx)?;
|
||||
|
||||
self.storage.update_active_decks(&decks[0])?;
|
||||
let mut queues = QueueBuilder::new(sort_options.clone());
|
||||
self.add_due_cards(&mut queues, &mut ctx, DueCardKind::Learning)?;
|
||||
self.add_due_cards(&mut queues, &mut ctx, DueCardKind::Review)?;
|
||||
|
||||
let get_bury_mode = |home_deck: DeckId| {
|
||||
deck_map
|
||||
.get(&home_deck)
|
||||
.and_then(|deck| deck.config_id())
|
||||
.and_then(|config_id| config.get(&config_id))
|
||||
.map(|config| BuryMode {
|
||||
bury_new: config.inner.bury_new,
|
||||
bury_reviews: config.inner.bury_reviews,
|
||||
})
|
||||
.unwrap_or_default()
|
||||
ctx.limits.cap_new_to_review();
|
||||
match ctx.sort_options.new_gather_priority {
|
||||
NewCardGatherPriority::Deck => self.add_new_cards_by_deck(&mut queues, &mut ctx)?,
|
||||
NewCardGatherPriority::LowestPosition => {
|
||||
self.add_new_cards_by_position(&mut queues, &mut ctx, false)?
|
||||
}
|
||||
NewCardGatherPriority::HighestPosition => {
|
||||
self.add_new_cards_by_position(&mut queues, &mut ctx, true)?
|
||||
}
|
||||
};
|
||||
|
||||
// intraday cards first, noting down any notes that will need burying
|
||||
self.storage
|
||||
.for_each_intraday_card_in_active_decks(timing.next_day_at, |card| {
|
||||
let bury = get_bury_mode(card.current_deck_id);
|
||||
queues.add_intraday_learning_card(card, bury)
|
||||
})?;
|
||||
|
||||
// interday learning, then reviews
|
||||
let mut add_due_cards = |kind: DueCardKind| -> Result<()> {
|
||||
if selected_deck_limits.review != 0 {
|
||||
self.storage.for_each_due_card_in_active_decks(
|
||||
timing.days_elapsed,
|
||||
sort_options.review_order,
|
||||
kind,
|
||||
|card| {
|
||||
if selected_deck_limits.review == 0 {
|
||||
return false;
|
||||
}
|
||||
let bury = get_bury_mode(card.original_deck_id.or(card.current_deck_id));
|
||||
let limits = remaining.get_mut(&card.current_deck_id).unwrap();
|
||||
if limits.review != 0 && queues.add_due_card(card, bury) {
|
||||
selected_deck_limits.review -= 1;
|
||||
limits.review -= 1;
|
||||
}
|
||||
|
||||
true
|
||||
},
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
add_due_cards(DueCardKind::Learning)?;
|
||||
add_due_cards(DueCardKind::Review)?;
|
||||
|
||||
// cap new cards to the remaining review limit
|
||||
for limit in remaining.values_mut() {
|
||||
limit.new = limit.new.min(limit.review).min(selected_deck_limits.review);
|
||||
}
|
||||
selected_deck_limits.new = selected_deck_limits.new.min(selected_deck_limits.review);
|
||||
|
||||
// new cards last
|
||||
let can_exit_early = sort_options.new_gather_priority == NewCardGatherPriority::Deck;
|
||||
let reverse = sort_options.new_gather_priority == NewCardGatherPriority::HighestPosition;
|
||||
for deck in &decks {
|
||||
if can_exit_early && selected_deck_limits.new == 0 {
|
||||
break;
|
||||
}
|
||||
let limit = remaining.get_mut(&deck.id).unwrap();
|
||||
if limit.new > 0 {
|
||||
self.storage
|
||||
.for_each_new_card_in_deck(deck.id, reverse, |card| {
|
||||
let bury = get_bury_mode(card.original_deck_id.or(deck.id));
|
||||
if limit.new != 0 {
|
||||
if queues.add_new_card(card, bury) {
|
||||
limit.new -= 1;
|
||||
selected_deck_limits.new =
|
||||
selected_deck_limits.new.saturating_sub(1);
|
||||
}
|
||||
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
let final_limits = RemainingLimits {
|
||||
new: selected_deck_limits_at_start
|
||||
.new
|
||||
.min(selected_deck_limits.review),
|
||||
..selected_deck_limits_at_start
|
||||
};
|
||||
let queues = queues.build(
|
||||
final_limits,
|
||||
ctx.limits.final_limits(),
|
||||
self.learn_ahead_secs() as i64,
|
||||
timing.days_elapsed,
|
||||
ctx.timing.days_elapsed,
|
||||
);
|
||||
|
||||
Ok(queues)
|
||||
}
|
||||
|
||||
fn add_intraday_learning_cards(
|
||||
&self,
|
||||
queues: &mut QueueBuilder,
|
||||
ctx: &mut Context,
|
||||
) -> Result<()> {
|
||||
self.storage
|
||||
.for_each_intraday_card_in_active_decks(ctx.timing.next_day_at, |card| {
|
||||
let bury = ctx.bury_mode(card.current_deck_id);
|
||||
queues.add_intraday_learning_card(card, bury)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_due_cards(
|
||||
&self,
|
||||
queues: &mut QueueBuilder,
|
||||
ctx: &mut Context,
|
||||
kind: DueCardKind,
|
||||
) -> Result<()> {
|
||||
if !ctx.limits.is_exhausted_root() {
|
||||
self.storage.for_each_due_card_in_active_decks(
|
||||
ctx.timing.days_elapsed,
|
||||
ctx.sort_options.review_order,
|
||||
kind,
|
||||
|card| {
|
||||
if ctx.limits.is_exhausted_root() {
|
||||
return false;
|
||||
}
|
||||
let bury = ctx.bury_mode(card.original_deck_id.or(card.current_deck_id));
|
||||
if let Some(node_id) = ctx.limits.remaining_node_id(card.current_deck_id) {
|
||||
if queues.add_due_card(card, bury) {
|
||||
ctx.limits.decrement_node_and_parent_review(&node_id);
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
},
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_new_cards_by_deck(&self, queues: &mut QueueBuilder, ctx: &mut Context) -> Result<()> {
|
||||
// TODO: must own Vec as closure below requires unique access to ctx
|
||||
// maybe decks should not be field of Context?
|
||||
for deck_id in ctx.active_deck_ids() {
|
||||
if ctx.limits.is_exhausted_root() {
|
||||
break;
|
||||
}
|
||||
if !ctx.limits.is_exhausted(deck_id) {
|
||||
self.storage.for_each_new_card_in_deck(deck_id, |card| {
|
||||
let bury = ctx.bury_mode(card.original_deck_id.or(deck_id));
|
||||
// TODO: This could be done more efficiently if we held on to the node_id
|
||||
// and only adjusted the parent nodes after this node's limit is reached
|
||||
if let Some(node_id) = ctx.limits.remaining_node_id(deck_id) {
|
||||
if queues.add_new_card(card, bury) {
|
||||
ctx.limits.decrement_node_and_parent_new(&node_id);
|
||||
}
|
||||
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_new_cards_by_position(
|
||||
&self,
|
||||
queues: &mut QueueBuilder,
|
||||
ctx: &mut Context,
|
||||
reverse: bool,
|
||||
) -> Result<()> {
|
||||
self.storage
|
||||
.for_each_new_card_in_active_decks(reverse, |card| {
|
||||
let bury = ctx.bury_mode(card.original_deck_id.or(card.current_deck_id));
|
||||
if let Some(node_id) = ctx.limits.remaining_node_id(card.current_deck_id) {
|
||||
if queues.add_new_card(card, bury) {
|
||||
ctx.limits.decrement_node_and_parent_new(&node_id);
|
||||
}
|
||||
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
13
rslib/src/storage/card/active_new_cards.sql
Normal file
13
rslib/src/storage/card/active_new_cards.sql
Normal file
|
@ -0,0 +1,13 @@
|
|||
SELECT id,
|
||||
nid,
|
||||
due,
|
||||
ord,
|
||||
cast(mod AS integer),
|
||||
did,
|
||||
odid
|
||||
FROM cards
|
||||
WHERE did IN (
|
||||
SELECT id
|
||||
FROM active_decks
|
||||
)
|
||||
AND queue = 0
|
|
@ -69,6 +69,19 @@ fn row_to_card(row: &Row) -> result::Result<Card, rusqlite::Error> {
|
|||
})
|
||||
}
|
||||
|
||||
fn row_to_new_card(row: &Row) -> result::Result<NewCard, rusqlite::Error> {
|
||||
Ok(NewCard {
|
||||
id: row.get(0)?,
|
||||
note_id: row.get(1)?,
|
||||
due: row.get(2)?,
|
||||
template_index: row.get(3)?,
|
||||
mtime: row.get(4)?,
|
||||
current_deck_id: row.get(5)?,
|
||||
original_deck_id: row.get(6)?,
|
||||
hash: 0,
|
||||
})
|
||||
}
|
||||
|
||||
impl super::SqliteStorage {
|
||||
pub fn get_card(&self, cid: CardId) -> Result<Option<Card>> {
|
||||
self.db
|
||||
|
@ -229,11 +242,30 @@ impl super::SqliteStorage {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Call func() for each new card, stopping when it returns false
|
||||
/// or no more cards found.
|
||||
pub(crate) fn for_each_new_card_in_deck<F>(
|
||||
/// Call func() for each new card in the provided deck, stopping when it
|
||||
/// returns or no more cards found.
|
||||
pub(crate) fn for_each_new_card_in_deck<F>(&self, deck: DeckId, mut func: F) -> Result<()>
|
||||
where
|
||||
F: FnMut(NewCard) -> bool,
|
||||
{
|
||||
let mut stmt = self.db.prepare_cached(&format!(
|
||||
"{} ORDER BY due ASC",
|
||||
include_str!("new_cards.sql")
|
||||
))?;
|
||||
let mut rows = stmt.query(params![deck])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
if !func(row_to_new_card(row)?) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Call func() for each new card in the active decks, stopping when it
|
||||
/// returns false or no more cards found.
|
||||
pub(crate) fn for_each_new_card_in_active_decks<F>(
|
||||
&self,
|
||||
deck: DeckId,
|
||||
reverse: bool,
|
||||
mut func: F,
|
||||
) -> Result<()>
|
||||
|
@ -242,20 +274,12 @@ impl super::SqliteStorage {
|
|||
{
|
||||
let mut stmt = self.db.prepare_cached(&format!(
|
||||
"{} ORDER BY {}",
|
||||
include_str!("new_cards.sql"),
|
||||
include_str!("active_new_cards.sql"),
|
||||
if reverse { "due desc" } else { "due asc" }
|
||||
))?;
|
||||
let mut rows = stmt.query(params![deck])?;
|
||||
let mut rows = stmt.query(params![])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
if !func(NewCard {
|
||||
id: row.get(0)?,
|
||||
note_id: row.get(1)?,
|
||||
due: row.get(2)?,
|
||||
template_index: row.get(3)?,
|
||||
mtime: row.get(4)?,
|
||||
original_deck_id: row.get(5)?,
|
||||
hash: 0,
|
||||
}) {
|
||||
if !func(row_to_new_card(row)?) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ use crate::{
|
|||
decks::{immediate_parent_name, DeckCommon, DeckKindContainer, DeckSchema11, DueCounts},
|
||||
error::DbErrorKind,
|
||||
prelude::*,
|
||||
text::escape_sql_wildcards,
|
||||
};
|
||||
|
||||
fn row_to_deck(row: &Row) -> Result<Deck> {
|
||||
|
@ -210,6 +211,19 @@ impl SqliteStorage {
|
|||
.collect()
|
||||
}
|
||||
|
||||
pub(crate) fn immediate_child_decks(&self, parent: &Deck) -> Result<Vec<Deck>> {
|
||||
let prefix_start = format!("{}\x1f", parent.name);
|
||||
let prefix_end = format!("{}\x20", parent.name);
|
||||
let child_descendant = format!("{}%\x1f%", escape_sql_wildcards(&prefix_start));
|
||||
self.db
|
||||
.prepare_cached(concat!(
|
||||
include_str!("get_deck.sql"),
|
||||
" where name >= ? and name < ? and not name like ? escape '\\'"
|
||||
))?
|
||||
.query_and_then([prefix_start, prefix_end, child_descendant], row_to_deck)?
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(crate) fn deck_id_with_children(&self, parent: &Deck) -> Result<Vec<DeckId>> {
|
||||
let prefix_start = format!("{}\x1f", parent.name);
|
||||
let prefix_end = format!("{}\x20", parent.name);
|
||||
|
|
|
@ -355,6 +355,14 @@ pub(crate) fn escape_anki_wildcards_for_search_node(txt: &str) -> String {
|
|||
}
|
||||
}
|
||||
|
||||
/// Unescape everything.
|
||||
pub(crate) fn escape_sql_wildcards(txt: &str) -> Cow<str> {
|
||||
lazy_static! {
|
||||
static ref RE: Regex = Regex::new(r"_|%").unwrap();
|
||||
}
|
||||
RE.replace_all(txt, "\\$1")
|
||||
}
|
||||
|
||||
/// Return a function to match input against `search`,
|
||||
/// which may contain wildcards.
|
||||
pub(crate) fn glob_matcher(search: &str) -> impl Fn(&str) -> bool + '_ {
|
||||
|
|
Loading…
Reference in a new issue