mirror of
https://github.com/ankitects/anki.git
synced 2025-09-21 07:22:23 -04:00
add new 'groups' concept, refactor deletions
Users who want to study small subsections at one time (eg, "lesson 14") are currently best served by creating lots of little decks. This is because: - selective study is a bit cumbersome to switch between - the graphs and statitics are for the entire deck - selective study can be slow on mobile devices - when the list of cards to hide/show is big, or when there are many due cards, performance can suffer - scheduling can only be configured per deck Groups are intended to address the above problems. All cards start off in the same group, but they can have their group changed. Unlike tags, cards can only be a member of a single group at once time. This allows us to divide the deck up into a non-overlapping set of cards, which will make things like showing due counts for a single category considerably cheaper. The user interface might want to show something like a deck browser for decks that have more than one group, showing due counts and allowing people to study each group individually, or to study all at once. Instead of storing the scheduling config in the deck or the model, we move the scheduling into a separate config table, and link that to the groups table. That way a user can have multiple groups that all share the same scheduling information if they want. And deletion tracking is now in a single table.
This commit is contained in:
parent
abd665e48f
commit
bb79b0e17c
10 changed files with 174 additions and 134 deletions
|
@ -17,6 +17,7 @@ MAX_TIMER = 60
|
||||||
# Type: 0=learning, 1=due, 2=new
|
# Type: 0=learning, 1=due, 2=new
|
||||||
# Queue: 0=learning, 1=due, 2=new
|
# Queue: 0=learning, 1=due, 2=new
|
||||||
# -1=suspended, -2=user buried, -3=sched buried
|
# -1=suspended, -2=user buried, -3=sched buried
|
||||||
|
# Group: scheduling group
|
||||||
# Ordinal: card template # for fact
|
# Ordinal: card template # for fact
|
||||||
# Flags: unused; reserved for future use
|
# Flags: unused; reserved for future use
|
||||||
|
|
||||||
|
@ -24,11 +25,10 @@ cardsTable = Table(
|
||||||
'cards', metadata,
|
'cards', metadata,
|
||||||
Column('id', Integer, primary_key=True),
|
Column('id', Integer, primary_key=True),
|
||||||
Column('factId', Integer, ForeignKey("facts.id"), nullable=False),
|
Column('factId', Integer, ForeignKey("facts.id"), nullable=False),
|
||||||
Column('modelId', Integer, ForeignKey("models.id"), nullable=False),
|
Column('groupId', Integer, nullable=False, default=1),
|
||||||
Column('cardModelId', Integer, ForeignKey("cardModels.id"), nullable=False),
|
Column('cardModelId', Integer, ForeignKey("cardModels.id"), nullable=False),
|
||||||
# general
|
|
||||||
Column('created', Float, nullable=False, default=time.time),
|
|
||||||
Column('modified', Float, nullable=False, default=time.time),
|
Column('modified', Float, nullable=False, default=time.time),
|
||||||
|
# general
|
||||||
Column('question', UnicodeText, nullable=False, default=u""),
|
Column('question', UnicodeText, nullable=False, default=u""),
|
||||||
Column('answer', UnicodeText, nullable=False, default=u""),
|
Column('answer', UnicodeText, nullable=False, default=u""),
|
||||||
Column('ordinal', Integer, nullable=False),
|
Column('ordinal', Integer, nullable=False),
|
||||||
|
@ -50,15 +50,14 @@ cardsTable = Table(
|
||||||
|
|
||||||
class Card(object):
|
class Card(object):
|
||||||
|
|
||||||
def __init__(self, fact=None, cardModel=None, created=None):
|
# FIXME: this needs tidying up
|
||||||
|
def __init__(self, fact=None, cardModel=None, due=None):
|
||||||
self.id = genID()
|
self.id = genID()
|
||||||
self.modified = time.time()
|
self.modified = time.time()
|
||||||
if created:
|
if due:
|
||||||
self.created = created
|
self.due = due
|
||||||
self.due = created
|
|
||||||
else:
|
else:
|
||||||
self.due = self.modified
|
self.due = self.modified
|
||||||
self.position = self.due
|
|
||||||
if fact:
|
if fact:
|
||||||
self.fact = fact
|
self.fact = fact
|
||||||
self.modelId = fact.modelId
|
self.modelId = fact.modelId
|
||||||
|
@ -145,9 +144,8 @@ class Card(object):
|
||||||
return
|
return
|
||||||
(self.id,
|
(self.id,
|
||||||
self.factId,
|
self.factId,
|
||||||
self.modelId,
|
self.groupId,
|
||||||
self.cardModelId,
|
self.cardModelId,
|
||||||
self.created,
|
|
||||||
self.modified,
|
self.modified,
|
||||||
self.question,
|
self.question,
|
||||||
self.answer,
|
self.answer,
|
||||||
|
@ -166,15 +164,11 @@ class Card(object):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def toDB(self, s):
|
def toDB(self, s):
|
||||||
|
# this shouldn't be used for schema changes
|
||||||
s.execute("""update cards set
|
s.execute("""update cards set
|
||||||
factId=:factId,
|
|
||||||
modelId=:modelId,
|
|
||||||
cardModelId=:cardModelId,
|
|
||||||
created=:created,
|
|
||||||
modified=:modified,
|
modified=:modified,
|
||||||
question=:question,
|
question=:question,
|
||||||
answer=:answer,
|
answer=:answer,
|
||||||
ordinal=:ordinal,
|
|
||||||
flags=:flags,
|
flags=:flags,
|
||||||
type=:type,
|
type=:type,
|
||||||
queue=:queue,
|
queue=:queue,
|
||||||
|
@ -198,12 +192,3 @@ mapper(Fact, factsTable, properties={
|
||||||
'model': relation(Model),
|
'model': relation(Model),
|
||||||
'fields': relation(Field, backref="fact", order_by=Field.ordinal),
|
'fields': relation(Field, backref="fact", order_by=Field.ordinal),
|
||||||
})
|
})
|
||||||
|
|
||||||
# Card deletions
|
|
||||||
##########################################################################
|
|
||||||
|
|
||||||
cardsDeletedTable = Table(
|
|
||||||
'cardsDeleted', metadata,
|
|
||||||
Column('cardId', Integer, ForeignKey("cards.id"),
|
|
||||||
nullable=False),
|
|
||||||
Column('deletedTime', Float, nullable=False))
|
|
||||||
|
|
66
anki/deck.py
66
anki/deck.py
|
@ -14,7 +14,7 @@ from anki.utils import parseTags, tidyHTML, genID, ids2str, hexifyID, \
|
||||||
from anki.revlog import logReview
|
from anki.revlog import logReview
|
||||||
from anki.models import Model, CardModel, formatQA
|
from anki.models import Model, CardModel, formatQA
|
||||||
from anki.fonts import toPlatformFont
|
from anki.fonts import toPlatformFont
|
||||||
from anki.tags import initTagTables, tagIds, tagId
|
from anki.tags import tagIds, tagId
|
||||||
from operator import itemgetter
|
from operator import itemgetter
|
||||||
from itertools import groupby
|
from itertools import groupby
|
||||||
from anki.hooks import runHook, hookEmpty
|
from anki.hooks import runHook, hookEmpty
|
||||||
|
@ -27,7 +27,7 @@ from anki.consts import *
|
||||||
import anki.latex # sets up hook
|
import anki.latex # sets up hook
|
||||||
|
|
||||||
# ensure all the DB metadata in other files is loaded before proceeding
|
# ensure all the DB metadata in other files is loaded before proceeding
|
||||||
import anki.models, anki.facts, anki.cards, anki.media
|
import anki.models, anki.facts, anki.cards, anki.media, anki.groups, anki.graves
|
||||||
|
|
||||||
# Selective study and new card limits. These vars are necessary to determine
|
# Selective study and new card limits. These vars are necessary to determine
|
||||||
# counts even on a minimum deck load, and thus are separate from the rest of
|
# counts even on a minimum deck load, and thus are separate from the rest of
|
||||||
|
@ -52,8 +52,6 @@ defaultConf = {
|
||||||
'collapseTime': 600,
|
'collapseTime': 600,
|
||||||
'sessionRepLimit': 0,
|
'sessionRepLimit': 0,
|
||||||
'sessionTimeLimit': 600,
|
'sessionTimeLimit': 600,
|
||||||
'suspendLeeches': True,
|
|
||||||
'leechFails': 16,
|
|
||||||
'currentModelId': None,
|
'currentModelId': None,
|
||||||
'mediaURL': "",
|
'mediaURL': "",
|
||||||
'latexPre': """\
|
'latexPre': """\
|
||||||
|
@ -550,8 +548,7 @@ where factId = :fid and cardModelId = :cmid""",
|
||||||
strids = ids2str(ids)
|
strids = ids2str(ids)
|
||||||
self.db.statement("delete from facts where id in %s" % strids)
|
self.db.statement("delete from facts where id in %s" % strids)
|
||||||
self.db.statement("delete from fields where factId in %s" % strids)
|
self.db.statement("delete from fields where factId in %s" % strids)
|
||||||
data = [{'id': id, 'time': now} for id in ids]
|
anki.graves.registerMany(self.db, anki.graves.FACT, ids)
|
||||||
self.db.statements("insert into factsDeleted values (:id, :time)", data)
|
|
||||||
self.setModified()
|
self.setModified()
|
||||||
|
|
||||||
def deleteDanglingFacts(self):
|
def deleteDanglingFacts(self):
|
||||||
|
@ -611,8 +608,7 @@ where facts.id not in (select distinct factId from cards)""")
|
||||||
# drop from cards
|
# drop from cards
|
||||||
self.db.statement("delete from cards where id in %s" % strids)
|
self.db.statement("delete from cards where id in %s" % strids)
|
||||||
# note deleted
|
# note deleted
|
||||||
data = [{'id': id, 'time': now} for id in ids]
|
anki.graves.registerMany(self.db, anki.graves.CARD, ids)
|
||||||
self.db.statements("insert into cardsDeleted values (:id, :time)", data)
|
|
||||||
# gather affected tags
|
# gather affected tags
|
||||||
tags = self.db.column0(
|
tags = self.db.column0(
|
||||||
"select tagId from cardTags where cardId in %s" %
|
"select tagId from cardTags where cardId in %s" %
|
||||||
|
@ -670,8 +666,7 @@ facts.id = cards.factId""", id=model.id))
|
||||||
self.db.flush()
|
self.db.flush()
|
||||||
if self.currentModel == model:
|
if self.currentModel == model:
|
||||||
self.currentModel = self.models[0]
|
self.currentModel = self.models[0]
|
||||||
self.db.statement("insert into modelsDeleted values (:id, :time)",
|
anki.graves.registerOne(self.db, anki.graves.MODEL, model.id)
|
||||||
id=model.id, time=time.time())
|
|
||||||
self.flushMod()
|
self.flushMod()
|
||||||
self.refreshSession()
|
self.refreshSession()
|
||||||
self.setModified()
|
self.setModified()
|
||||||
|
@ -2204,10 +2199,7 @@ Return new path, relative to media dir."""
|
||||||
|
|
||||||
def setSchemaModified(self):
|
def setSchemaModified(self):
|
||||||
self.schemaMod = time.time()
|
self.schemaMod = time.time()
|
||||||
# since we guarantee a full sync to all clients, this is a good time
|
anki.graves.forgetAll(self.db)
|
||||||
# to forget old gravestones
|
|
||||||
for k in ("cards", "facts", "models", "media"):
|
|
||||||
self.db.statement("delete from %sDeleted" % k)
|
|
||||||
|
|
||||||
def flushMod(self):
|
def flushMod(self):
|
||||||
"Mark modified and flush to DB."
|
"Mark modified and flush to DB."
|
||||||
|
@ -2727,8 +2719,9 @@ class DeckStorage(object):
|
||||||
(engine, session) = DeckStorage._attach(path, create, pool)
|
(engine, session) = DeckStorage._attach(path, create, pool)
|
||||||
s = session()
|
s = session()
|
||||||
if create:
|
if create:
|
||||||
|
DeckStorage._addTables(engine)
|
||||||
metadata.create_all(engine)
|
metadata.create_all(engine)
|
||||||
initTagTables(engine)
|
DeckStorage._addConfig(engine)
|
||||||
deck = DeckStorage._init(s)
|
deck = DeckStorage._init(s)
|
||||||
updateIndices(engine)
|
updateIndices(engine)
|
||||||
engine.execute("analyze")
|
engine.execute("analyze")
|
||||||
|
@ -2736,6 +2729,7 @@ class DeckStorage(object):
|
||||||
ver = upgradeSchema(engine, s)
|
ver = upgradeSchema(engine, s)
|
||||||
# add any possibly new tables if we're upgrading
|
# add any possibly new tables if we're upgrading
|
||||||
if ver < DECK_VERSION:
|
if ver < DECK_VERSION:
|
||||||
|
DeckStorage._addTables(engine)
|
||||||
metadata.create_all(engine)
|
metadata.create_all(engine)
|
||||||
deck = s.query(Deck).get(1)
|
deck = s.query(Deck).get(1)
|
||||||
if not deck:
|
if not deck:
|
||||||
|
@ -2761,7 +2755,6 @@ class DeckStorage(object):
|
||||||
type="inuse")
|
type="inuse")
|
||||||
else:
|
else:
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
_getDeck = staticmethod(_getDeck)
|
_getDeck = staticmethod(_getDeck)
|
||||||
|
|
||||||
def _attach(path, create, pool=True):
|
def _attach(path, create, pool=True):
|
||||||
|
@ -2794,6 +2787,47 @@ class DeckStorage(object):
|
||||||
return deck
|
return deck
|
||||||
_init = staticmethod(_init)
|
_init = staticmethod(_init)
|
||||||
|
|
||||||
|
def _addConfig(s):
|
||||||
|
"Add a default group & config."
|
||||||
|
s.execute("""
|
||||||
|
insert into groupConfig values (1, :t, :name, :conf)""",
|
||||||
|
t=time.time(), name=_("Default Config"),
|
||||||
|
conf=simplejson.dumps(anki.groups.defaultConf))
|
||||||
|
s.execute("""
|
||||||
|
insert into groups values (1, :t, "Default", 1)""",
|
||||||
|
t=time.time())
|
||||||
|
_addConfig = staticmethod(_addConfig)
|
||||||
|
|
||||||
|
def _addTables(s):
|
||||||
|
"Add tables with syntax that older sqlalchemy versions don't support."
|
||||||
|
sql = [
|
||||||
|
"""
|
||||||
|
create table tags (
|
||||||
|
id integer not null,
|
||||||
|
name text not null collate nocase unique,
|
||||||
|
priority integer not null default 0,
|
||||||
|
primary key(id))""",
|
||||||
|
"""
|
||||||
|
create table cardTags (
|
||||||
|
cardId integer not null,
|
||||||
|
tagId integer not null,
|
||||||
|
type integer not null,
|
||||||
|
primary key(tagId, cardId))""",
|
||||||
|
"""
|
||||||
|
create table groups (
|
||||||
|
id integer primary key autoincrement,
|
||||||
|
modified integer not null,
|
||||||
|
name text not null collate nocase unique,
|
||||||
|
confId integer not null)"""
|
||||||
|
]
|
||||||
|
for table in sql:
|
||||||
|
try:
|
||||||
|
s.execute(table)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
_addTables = staticmethod(_addTables)
|
||||||
|
|
||||||
def Deck(path, backup=True, pool=True, minimal=False):
|
def Deck(path, backup=True, pool=True, minimal=False):
|
||||||
"Create a new deck or attach to an existing one. Path should be unicode."
|
"Create a new deck or attach to an existing one. Path should be unicode."
|
||||||
path = os.path.abspath(path)
|
path = os.path.abspath(path)
|
||||||
|
|
|
@ -145,12 +145,3 @@ class Fact(object):
|
||||||
self.values()))
|
self.values()))
|
||||||
for card in self.cards:
|
for card in self.cards:
|
||||||
card.rebuildQA(deck)
|
card.rebuildQA(deck)
|
||||||
|
|
||||||
# Fact deletions
|
|
||||||
##########################################################################
|
|
||||||
|
|
||||||
factsDeletedTable = Table(
|
|
||||||
'factsDeleted', metadata,
|
|
||||||
Column('factId', Integer, ForeignKey("facts.id"),
|
|
||||||
nullable=False),
|
|
||||||
Column('deletedTime', Float, nullable=False))
|
|
||||||
|
|
34
anki/graves.py
Normal file
34
anki/graves.py
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright: Damien Elmes <anki@ichi2.net>
|
||||||
|
# License: GNU GPL, version 3 or later; http://www.gnu.org/copyleft/gpl.html
|
||||||
|
|
||||||
|
# FIXME:
|
||||||
|
# - check if we have to int(time)
|
||||||
|
# - port all the code referencing the old tables
|
||||||
|
|
||||||
|
import time
|
||||||
|
from anki.db import *
|
||||||
|
|
||||||
|
FACT = 0
|
||||||
|
CARD = 1
|
||||||
|
MODEL = 2
|
||||||
|
MEDIA = 3
|
||||||
|
GROUP = 4
|
||||||
|
GROUPCONFIG = 5
|
||||||
|
|
||||||
|
gravestonesTable = Table(
|
||||||
|
'gravestones', metadata,
|
||||||
|
Column('delTime', Integer, nullable=False),
|
||||||
|
Column('objectId', Integer, nullable=False),
|
||||||
|
Column('type', Integer, nullable=False))
|
||||||
|
|
||||||
|
def registerOne(db, type, id):
|
||||||
|
db.statement("insert into gravestones values (:t, :id, :ty)",
|
||||||
|
t=time.time(), id=id, ty=type)
|
||||||
|
|
||||||
|
def registerMany(db, type, ids):
|
||||||
|
db.statements("insert into gravestones values (:t, :id, :ty)",
|
||||||
|
[{'t':time.time(), 'id':x, 'ty':type} for x in ids])
|
||||||
|
|
||||||
|
def forgetAll(db):
|
||||||
|
db.statement("delete from gravestones")
|
54
anki/groups.py
Normal file
54
anki/groups.py
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright: Damien Elmes <anki@ichi2.net>
|
||||||
|
# License: GNU GPL, version 3 or later; http://www.gnu.org/copyleft/gpl.html
|
||||||
|
|
||||||
|
import simplejson, time
|
||||||
|
from anki.db import *
|
||||||
|
|
||||||
|
groupsTable = Table(
|
||||||
|
'groups', metadata,
|
||||||
|
Column('id', Integer, primary_key=True),
|
||||||
|
Column('modified', Float, nullable=False, default=time.time),
|
||||||
|
Column('name', UnicodeText, nullable=False),
|
||||||
|
Column('confId', Integer, nullable=False))
|
||||||
|
|
||||||
|
# maybe define a random cutoff at say +/-30% which controls exit interval
|
||||||
|
# variation - 30% of 1 day is 0.7 or 1.3 so always 1 day; 30% of 4 days is
|
||||||
|
# 2.8-5.2, so any time from 3-5 days is acceptable
|
||||||
|
|
||||||
|
defaultConf = {
|
||||||
|
'new': {
|
||||||
|
'delays': [0.5, 3, 10],
|
||||||
|
'ints': [1, 7, 4],
|
||||||
|
},
|
||||||
|
'lapse': {
|
||||||
|
'delays': [0.5, 3, 10],
|
||||||
|
'ints': [1, 7, 4],
|
||||||
|
'mult': 0
|
||||||
|
},
|
||||||
|
'initialFactor': 2.5,
|
||||||
|
'suspendLeeches': True,
|
||||||
|
'leechFails': 16,
|
||||||
|
}
|
||||||
|
|
||||||
|
groupConfigTable = Table(
|
||||||
|
'groupConfig', metadata,
|
||||||
|
Column('id', Integer, primary_key=True),
|
||||||
|
Column('modified', Float, nullable=False, default=time.time),
|
||||||
|
Column('name', UnicodeText, nullable=False),
|
||||||
|
Column('config', UnicodeText, nullable=False,
|
||||||
|
default=unicode(simplejson.dumps(defaultConf))))
|
||||||
|
|
||||||
|
class GroupConfig(object):
|
||||||
|
def __init__(self, name):
|
||||||
|
self.name = name
|
||||||
|
self.id = genID()
|
||||||
|
self.config = defaultConf
|
||||||
|
|
||||||
|
def save(self):
|
||||||
|
self._config = simplejson.dumps(self.config)
|
||||||
|
self.modified = time.time()
|
||||||
|
|
||||||
|
mapper(GroupConfig, groupConfigTable, properties={
|
||||||
|
'_config': groupConfigTable.c.config,
|
||||||
|
})
|
|
@ -22,12 +22,6 @@ mediaTable = Table(
|
||||||
Column('modified', Float, nullable=False),
|
Column('modified', Float, nullable=False),
|
||||||
Column('chksum', UnicodeText, nullable=False, default=u""))
|
Column('chksum', UnicodeText, nullable=False, default=u""))
|
||||||
|
|
||||||
mediaDeletedTable = Table(
|
|
||||||
'mediaDeleted', metadata,
|
|
||||||
Column('mediaId', Integer, ForeignKey("cards.id"),
|
|
||||||
nullable=False),
|
|
||||||
Column('deletedTime', Float, nullable=False))
|
|
||||||
|
|
||||||
# File handling
|
# File handling
|
||||||
##########################################################################
|
##########################################################################
|
||||||
|
|
||||||
|
|
|
@ -153,37 +153,13 @@ def formatQA(cid, mid, fact, tags, cm, deck):
|
||||||
# Model table
|
# Model table
|
||||||
##########################################################################
|
##########################################################################
|
||||||
|
|
||||||
# maybe define a random cutoff at say +/-30% which controls exit interval
|
|
||||||
# variation - 30% of 1 day is 0.7 or 1.3 so always 1 day; 30% of 4 days is
|
|
||||||
# 2.8-5.2, so any time from 3-5 days is acceptable
|
|
||||||
|
|
||||||
# collapse time should be bigger than default failSchedule
|
|
||||||
|
|
||||||
# need to think about failed interval handling - if the final drill is
|
|
||||||
# optional, what intervals should the default be? 3 days or more if cards are
|
|
||||||
# over that interval range? and what about failed mature bonus?
|
|
||||||
|
|
||||||
defaultConf = {
|
|
||||||
'new': {
|
|
||||||
'delays': [0.5, 3, 10],
|
|
||||||
'ints': [1, 7, 4],
|
|
||||||
},
|
|
||||||
'lapse': {
|
|
||||||
'delays': [0.5, 3, 10],
|
|
||||||
'ints': [1, 7, 4],
|
|
||||||
'mult': 0
|
|
||||||
},
|
|
||||||
'initialFactor': 2.5,
|
|
||||||
}
|
|
||||||
|
|
||||||
modelsTable = Table(
|
modelsTable = Table(
|
||||||
'models', metadata,
|
'models', metadata,
|
||||||
Column('id', Integer, primary_key=True),
|
Column('id', Integer, primary_key=True),
|
||||||
Column('created', Float, nullable=False, default=time.time),
|
|
||||||
Column('modified', Float, nullable=False, default=time.time),
|
Column('modified', Float, nullable=False, default=time.time),
|
||||||
Column('name', UnicodeText, nullable=False),
|
Column('name', UnicodeText, nullable=False),
|
||||||
Column('config', UnicodeText, nullable=False,
|
# currently unused
|
||||||
default=unicode(simplejson.dumps(defaultConf))),
|
Column('config', UnicodeText, nullable=False, default=u"")
|
||||||
)
|
)
|
||||||
|
|
||||||
class Model(object):
|
class Model(object):
|
||||||
|
@ -219,12 +195,3 @@ mapper(Model, modelsTable, properties={
|
||||||
order_by=[cardModelsTable.c.ordinal],
|
order_by=[cardModelsTable.c.ordinal],
|
||||||
cascade="all, delete-orphan"),
|
cascade="all, delete-orphan"),
|
||||||
})
|
})
|
||||||
|
|
||||||
# Model deletions
|
|
||||||
##########################################################################
|
|
||||||
|
|
||||||
modelsDeletedTable = Table(
|
|
||||||
'modelsDeleted', metadata,
|
|
||||||
Column('modelId', Integer, ForeignKey("models.id"),
|
|
||||||
nullable=False),
|
|
||||||
Column('deletedTime', Float, nullable=False))
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ class Scheduler(object):
|
||||||
return card
|
return card
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
self.modelConfigs = {}
|
self.resetConfig()
|
||||||
self.resetLearn()
|
self.resetLearn()
|
||||||
self.resetReview()
|
self.resetReview()
|
||||||
self.resetNew()
|
self.resetNew()
|
||||||
|
@ -474,13 +474,18 @@ and queue between 1 and 2""",
|
||||||
# Tools
|
# Tools
|
||||||
##########################################################################
|
##########################################################################
|
||||||
|
|
||||||
|
def resetConfig(self):
|
||||||
|
"Update group config cache."
|
||||||
|
self.groupConfigs = dict(self.db.all("select id, confId from groups"))
|
||||||
|
self.configCache = {}
|
||||||
|
|
||||||
def configForCard(self, card):
|
def configForCard(self, card):
|
||||||
mid = card.modelId
|
id = self.groupConfigs[card.groupId]
|
||||||
if not mid in self.modelConfigs:
|
if id not in self.configCache:
|
||||||
self.modelConfigs[mid] = simplejson.loads(
|
self.configCache[id] = simplejson.loads(
|
||||||
self.db.scalar("select config from models where id = :id",
|
self.db.scalar("select config from groupConfig where id = :id",
|
||||||
id=mid))
|
id=id))
|
||||||
return self.modelConfigs[mid]
|
return self.configCache[id]
|
||||||
|
|
||||||
def resetSchedBuried(self):
|
def resetSchedBuried(self):
|
||||||
"Put temporarily suspended cards back into play."
|
"Put temporarily suspended cards back into play."
|
||||||
|
|
20
anki/tags.py
20
anki/tags.py
|
@ -5,26 +5,6 @@
|
||||||
from anki.db import *
|
from anki.db import *
|
||||||
|
|
||||||
# Type: 0=fact, 1=model, 2=template
|
# Type: 0=fact, 1=model, 2=template
|
||||||
# Priority: -100 to 100
|
|
||||||
|
|
||||||
# older sqlalchemy versions didn't support collate for sqlite, so we do it
|
|
||||||
# manually
|
|
||||||
def initTagTables(s):
|
|
||||||
try:
|
|
||||||
s.execute("""
|
|
||||||
create table tags (
|
|
||||||
id integer not null,
|
|
||||||
name text not null collate nocase unique,
|
|
||||||
priority integer not null default 0,
|
|
||||||
primary key(id))""")
|
|
||||||
s.execute("""
|
|
||||||
create table cardTags (
|
|
||||||
cardId integer not null,
|
|
||||||
tagId integer not null,
|
|
||||||
type integer not null,
|
|
||||||
primary key(tagId, cardId))""")
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def tagId(s, tag, create=True):
|
def tagId(s, tag, create=True):
|
||||||
"Return ID for tag, creating if necessary."
|
"Return ID for tag, creating if necessary."
|
||||||
|
|
|
@ -8,7 +8,6 @@ import time, simplejson
|
||||||
from anki.db import *
|
from anki.db import *
|
||||||
from anki.lang import _
|
from anki.lang import _
|
||||||
from anki.media import rebuildMediaDir
|
from anki.media import rebuildMediaDir
|
||||||
from anki.tags import initTagTables
|
|
||||||
|
|
||||||
def moveTable(s, table):
|
def moveTable(s, table):
|
||||||
sql = s.scalar(
|
sql = s.scalar(
|
||||||
|
@ -37,17 +36,16 @@ def upgradeSchema(engine, s):
|
||||||
import cards
|
import cards
|
||||||
metadata.create_all(engine, tables=[cards.cardsTable])
|
metadata.create_all(engine, tables=[cards.cardsTable])
|
||||||
s.execute("""
|
s.execute("""
|
||||||
insert into cards select id, factId,
|
insert into cards select id, factId, 1, cardModelId, modified, question,
|
||||||
(select modelId from facts where facts.id = cards2.factId),
|
answer, ordinal, 0, relativeDelay, type, due, interval, factor, reps,
|
||||||
cardModelId, created, modified,
|
successive, noCount, 0, 0 from cards2""")
|
||||||
question, answer, ordinal, 0, relativeDelay, type, due, interval,
|
|
||||||
factor, reps, successive, noCount, 0, 0 from cards2""")
|
|
||||||
s.execute("drop table cards2")
|
s.execute("drop table cards2")
|
||||||
# tags
|
# tags
|
||||||
###########
|
###########
|
||||||
moveTable(s, "tags")
|
moveTable(s, "tags")
|
||||||
moveTable(s, "cardTags")
|
moveTable(s, "cardTags")
|
||||||
initTagTables(s)
|
import deck
|
||||||
|
deck.DeckStorage._addTables(engine)
|
||||||
s.execute("insert or ignore into tags select id, tag, 0 from tags2")
|
s.execute("insert or ignore into tags select id, tag, 0 from tags2")
|
||||||
s.execute("""
|
s.execute("""
|
||||||
insert or ignore into cardTags select cardId, tagId, src from cardTags2""")
|
insert or ignore into cardTags select cardId, tagId, src from cardTags2""")
|
||||||
|
@ -80,8 +78,7 @@ originalPath from media2""")
|
||||||
import models
|
import models
|
||||||
metadata.create_all(engine, tables=[models.modelsTable])
|
metadata.create_all(engine, tables=[models.modelsTable])
|
||||||
s.execute("""
|
s.execute("""
|
||||||
insert or ignore into models select id, created, modified, name,
|
insert or ignore into models select id, modified, name, "" from models2""")
|
||||||
:c from models2""", {'c':simplejson.dumps(models.defaultConf)})
|
|
||||||
s.execute("drop table models2")
|
s.execute("drop table models2")
|
||||||
|
|
||||||
return ver
|
return ver
|
||||||
|
@ -149,13 +146,7 @@ create index if not exists ix_fields_chksum on fields (chksum)""")
|
||||||
create index if not exists ix_media_chksum on media (chksum)""")
|
create index if not exists ix_media_chksum on media (chksum)""")
|
||||||
# deletion tracking
|
# deletion tracking
|
||||||
db.execute("""
|
db.execute("""
|
||||||
create index if not exists ix_cardsDeleted_cardId on cardsDeleted (cardId)""")
|
create index if not exists ix_gravestones_delTime on gravestones (delTime)""")
|
||||||
db.execute("""
|
|
||||||
create index if not exists ix_modelsDeleted_modelId on modelsDeleted (modelId)""")
|
|
||||||
db.execute("""
|
|
||||||
create index if not exists ix_factsDeleted_factId on factsDeleted (factId)""")
|
|
||||||
db.execute("""
|
|
||||||
create index if not exists ix_mediaDeleted_factId on mediaDeleted (mediaId)""")
|
|
||||||
# tags
|
# tags
|
||||||
db.execute("""
|
db.execute("""
|
||||||
create index if not exists ix_cardTags_cardId on cardTags (cardId)""")
|
create index if not exists ix_cardTags_cardId on cardTags (cardId)""")
|
||||||
|
@ -209,12 +200,17 @@ cast(min(thinkingTime, 60)*1000 as int), 0 from reviewHistory""")
|
||||||
# remove queueDue as it's become dynamic, and type index
|
# remove queueDue as it's become dynamic, and type index
|
||||||
deck.db.statement("drop index if exists ix_cards_queueDue")
|
deck.db.statement("drop index if exists ix_cards_queueDue")
|
||||||
deck.db.statement("drop index if exists ix_cards_type")
|
deck.db.statement("drop index if exists ix_cards_type")
|
||||||
|
# remove old deleted tables
|
||||||
|
for t in ("cards", "facts", "models", "media"):
|
||||||
|
deck.db.statement("drop table if exists %sDeleted" % t)
|
||||||
# finally, update indices & optimize
|
# finally, update indices & optimize
|
||||||
updateIndices(deck.db)
|
updateIndices(deck.db)
|
||||||
# setup limits & config for dynamicIndices()
|
# setup limits & config for dynamicIndices()
|
||||||
deck.limits = simplejson.loads(deck._limits)
|
deck.limits = simplejson.loads(deck._limits)
|
||||||
deck.config = simplejson.loads(deck._config)
|
deck.config = simplejson.loads(deck._config)
|
||||||
|
# add default config
|
||||||
|
import deck as deckMod
|
||||||
|
deckMod.DeckStorage._addConfig(deck.engine)
|
||||||
|
|
||||||
deck.updateDynamicIndices()
|
deck.updateDynamicIndices()
|
||||||
deck.db.execute("vacuum")
|
deck.db.execute("vacuum")
|
||||||
|
|
Loading…
Reference in a new issue