mirror of
https://github.com/ankitects/anki.git
synced 2025-09-21 15:32:23 -04:00
add new 'groups' concept, refactor deletions
Users who want to study small subsections at one time (eg, "lesson 14") are currently best served by creating lots of little decks. This is because: - selective study is a bit cumbersome to switch between - the graphs and statitics are for the entire deck - selective study can be slow on mobile devices - when the list of cards to hide/show is big, or when there are many due cards, performance can suffer - scheduling can only be configured per deck Groups are intended to address the above problems. All cards start off in the same group, but they can have their group changed. Unlike tags, cards can only be a member of a single group at once time. This allows us to divide the deck up into a non-overlapping set of cards, which will make things like showing due counts for a single category considerably cheaper. The user interface might want to show something like a deck browser for decks that have more than one group, showing due counts and allowing people to study each group individually, or to study all at once. Instead of storing the scheduling config in the deck or the model, we move the scheduling into a separate config table, and link that to the groups table. That way a user can have multiple groups that all share the same scheduling information if they want. And deletion tracking is now in a single table.
This commit is contained in:
parent
abd665e48f
commit
bb79b0e17c
10 changed files with 174 additions and 134 deletions
|
@ -17,6 +17,7 @@ MAX_TIMER = 60
|
|||
# Type: 0=learning, 1=due, 2=new
|
||||
# Queue: 0=learning, 1=due, 2=new
|
||||
# -1=suspended, -2=user buried, -3=sched buried
|
||||
# Group: scheduling group
|
||||
# Ordinal: card template # for fact
|
||||
# Flags: unused; reserved for future use
|
||||
|
||||
|
@ -24,11 +25,10 @@ cardsTable = Table(
|
|||
'cards', metadata,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('factId', Integer, ForeignKey("facts.id"), nullable=False),
|
||||
Column('modelId', Integer, ForeignKey("models.id"), nullable=False),
|
||||
Column('groupId', Integer, nullable=False, default=1),
|
||||
Column('cardModelId', Integer, ForeignKey("cardModels.id"), nullable=False),
|
||||
# general
|
||||
Column('created', Float, nullable=False, default=time.time),
|
||||
Column('modified', Float, nullable=False, default=time.time),
|
||||
# general
|
||||
Column('question', UnicodeText, nullable=False, default=u""),
|
||||
Column('answer', UnicodeText, nullable=False, default=u""),
|
||||
Column('ordinal', Integer, nullable=False),
|
||||
|
@ -50,15 +50,14 @@ cardsTable = Table(
|
|||
|
||||
class Card(object):
|
||||
|
||||
def __init__(self, fact=None, cardModel=None, created=None):
|
||||
# FIXME: this needs tidying up
|
||||
def __init__(self, fact=None, cardModel=None, due=None):
|
||||
self.id = genID()
|
||||
self.modified = time.time()
|
||||
if created:
|
||||
self.created = created
|
||||
self.due = created
|
||||
if due:
|
||||
self.due = due
|
||||
else:
|
||||
self.due = self.modified
|
||||
self.position = self.due
|
||||
if fact:
|
||||
self.fact = fact
|
||||
self.modelId = fact.modelId
|
||||
|
@ -145,9 +144,8 @@ class Card(object):
|
|||
return
|
||||
(self.id,
|
||||
self.factId,
|
||||
self.modelId,
|
||||
self.groupId,
|
||||
self.cardModelId,
|
||||
self.created,
|
||||
self.modified,
|
||||
self.question,
|
||||
self.answer,
|
||||
|
@ -166,15 +164,11 @@ class Card(object):
|
|||
return True
|
||||
|
||||
def toDB(self, s):
|
||||
# this shouldn't be used for schema changes
|
||||
s.execute("""update cards set
|
||||
factId=:factId,
|
||||
modelId=:modelId,
|
||||
cardModelId=:cardModelId,
|
||||
created=:created,
|
||||
modified=:modified,
|
||||
question=:question,
|
||||
answer=:answer,
|
||||
ordinal=:ordinal,
|
||||
flags=:flags,
|
||||
type=:type,
|
||||
queue=:queue,
|
||||
|
@ -198,12 +192,3 @@ mapper(Fact, factsTable, properties={
|
|||
'model': relation(Model),
|
||||
'fields': relation(Field, backref="fact", order_by=Field.ordinal),
|
||||
})
|
||||
|
||||
# Card deletions
|
||||
##########################################################################
|
||||
|
||||
cardsDeletedTable = Table(
|
||||
'cardsDeleted', metadata,
|
||||
Column('cardId', Integer, ForeignKey("cards.id"),
|
||||
nullable=False),
|
||||
Column('deletedTime', Float, nullable=False))
|
||||
|
|
66
anki/deck.py
66
anki/deck.py
|
@ -14,7 +14,7 @@ from anki.utils import parseTags, tidyHTML, genID, ids2str, hexifyID, \
|
|||
from anki.revlog import logReview
|
||||
from anki.models import Model, CardModel, formatQA
|
||||
from anki.fonts import toPlatformFont
|
||||
from anki.tags import initTagTables, tagIds, tagId
|
||||
from anki.tags import tagIds, tagId
|
||||
from operator import itemgetter
|
||||
from itertools import groupby
|
||||
from anki.hooks import runHook, hookEmpty
|
||||
|
@ -27,7 +27,7 @@ from anki.consts import *
|
|||
import anki.latex # sets up hook
|
||||
|
||||
# ensure all the DB metadata in other files is loaded before proceeding
|
||||
import anki.models, anki.facts, anki.cards, anki.media
|
||||
import anki.models, anki.facts, anki.cards, anki.media, anki.groups, anki.graves
|
||||
|
||||
# Selective study and new card limits. These vars are necessary to determine
|
||||
# counts even on a minimum deck load, and thus are separate from the rest of
|
||||
|
@ -52,8 +52,6 @@ defaultConf = {
|
|||
'collapseTime': 600,
|
||||
'sessionRepLimit': 0,
|
||||
'sessionTimeLimit': 600,
|
||||
'suspendLeeches': True,
|
||||
'leechFails': 16,
|
||||
'currentModelId': None,
|
||||
'mediaURL': "",
|
||||
'latexPre': """\
|
||||
|
@ -550,8 +548,7 @@ where factId = :fid and cardModelId = :cmid""",
|
|||
strids = ids2str(ids)
|
||||
self.db.statement("delete from facts where id in %s" % strids)
|
||||
self.db.statement("delete from fields where factId in %s" % strids)
|
||||
data = [{'id': id, 'time': now} for id in ids]
|
||||
self.db.statements("insert into factsDeleted values (:id, :time)", data)
|
||||
anki.graves.registerMany(self.db, anki.graves.FACT, ids)
|
||||
self.setModified()
|
||||
|
||||
def deleteDanglingFacts(self):
|
||||
|
@ -611,8 +608,7 @@ where facts.id not in (select distinct factId from cards)""")
|
|||
# drop from cards
|
||||
self.db.statement("delete from cards where id in %s" % strids)
|
||||
# note deleted
|
||||
data = [{'id': id, 'time': now} for id in ids]
|
||||
self.db.statements("insert into cardsDeleted values (:id, :time)", data)
|
||||
anki.graves.registerMany(self.db, anki.graves.CARD, ids)
|
||||
# gather affected tags
|
||||
tags = self.db.column0(
|
||||
"select tagId from cardTags where cardId in %s" %
|
||||
|
@ -670,8 +666,7 @@ facts.id = cards.factId""", id=model.id))
|
|||
self.db.flush()
|
||||
if self.currentModel == model:
|
||||
self.currentModel = self.models[0]
|
||||
self.db.statement("insert into modelsDeleted values (:id, :time)",
|
||||
id=model.id, time=time.time())
|
||||
anki.graves.registerOne(self.db, anki.graves.MODEL, model.id)
|
||||
self.flushMod()
|
||||
self.refreshSession()
|
||||
self.setModified()
|
||||
|
@ -2204,10 +2199,7 @@ Return new path, relative to media dir."""
|
|||
|
||||
def setSchemaModified(self):
|
||||
self.schemaMod = time.time()
|
||||
# since we guarantee a full sync to all clients, this is a good time
|
||||
# to forget old gravestones
|
||||
for k in ("cards", "facts", "models", "media"):
|
||||
self.db.statement("delete from %sDeleted" % k)
|
||||
anki.graves.forgetAll(self.db)
|
||||
|
||||
def flushMod(self):
|
||||
"Mark modified and flush to DB."
|
||||
|
@ -2727,8 +2719,9 @@ class DeckStorage(object):
|
|||
(engine, session) = DeckStorage._attach(path, create, pool)
|
||||
s = session()
|
||||
if create:
|
||||
DeckStorage._addTables(engine)
|
||||
metadata.create_all(engine)
|
||||
initTagTables(engine)
|
||||
DeckStorage._addConfig(engine)
|
||||
deck = DeckStorage._init(s)
|
||||
updateIndices(engine)
|
||||
engine.execute("analyze")
|
||||
|
@ -2736,6 +2729,7 @@ class DeckStorage(object):
|
|||
ver = upgradeSchema(engine, s)
|
||||
# add any possibly new tables if we're upgrading
|
||||
if ver < DECK_VERSION:
|
||||
DeckStorage._addTables(engine)
|
||||
metadata.create_all(engine)
|
||||
deck = s.query(Deck).get(1)
|
||||
if not deck:
|
||||
|
@ -2761,7 +2755,6 @@ class DeckStorage(object):
|
|||
type="inuse")
|
||||
else:
|
||||
raise e
|
||||
|
||||
_getDeck = staticmethod(_getDeck)
|
||||
|
||||
def _attach(path, create, pool=True):
|
||||
|
@ -2794,6 +2787,47 @@ class DeckStorage(object):
|
|||
return deck
|
||||
_init = staticmethod(_init)
|
||||
|
||||
def _addConfig(s):
|
||||
"Add a default group & config."
|
||||
s.execute("""
|
||||
insert into groupConfig values (1, :t, :name, :conf)""",
|
||||
t=time.time(), name=_("Default Config"),
|
||||
conf=simplejson.dumps(anki.groups.defaultConf))
|
||||
s.execute("""
|
||||
insert into groups values (1, :t, "Default", 1)""",
|
||||
t=time.time())
|
||||
_addConfig = staticmethod(_addConfig)
|
||||
|
||||
def _addTables(s):
|
||||
"Add tables with syntax that older sqlalchemy versions don't support."
|
||||
sql = [
|
||||
"""
|
||||
create table tags (
|
||||
id integer not null,
|
||||
name text not null collate nocase unique,
|
||||
priority integer not null default 0,
|
||||
primary key(id))""",
|
||||
"""
|
||||
create table cardTags (
|
||||
cardId integer not null,
|
||||
tagId integer not null,
|
||||
type integer not null,
|
||||
primary key(tagId, cardId))""",
|
||||
"""
|
||||
create table groups (
|
||||
id integer primary key autoincrement,
|
||||
modified integer not null,
|
||||
name text not null collate nocase unique,
|
||||
confId integer not null)"""
|
||||
]
|
||||
for table in sql:
|
||||
try:
|
||||
s.execute(table)
|
||||
except:
|
||||
pass
|
||||
|
||||
_addTables = staticmethod(_addTables)
|
||||
|
||||
def Deck(path, backup=True, pool=True, minimal=False):
|
||||
"Create a new deck or attach to an existing one. Path should be unicode."
|
||||
path = os.path.abspath(path)
|
||||
|
|
|
@ -145,12 +145,3 @@ class Fact(object):
|
|||
self.values()))
|
||||
for card in self.cards:
|
||||
card.rebuildQA(deck)
|
||||
|
||||
# Fact deletions
|
||||
##########################################################################
|
||||
|
||||
factsDeletedTable = Table(
|
||||
'factsDeleted', metadata,
|
||||
Column('factId', Integer, ForeignKey("facts.id"),
|
||||
nullable=False),
|
||||
Column('deletedTime', Float, nullable=False))
|
||||
|
|
34
anki/graves.py
Normal file
34
anki/graves.py
Normal file
|
@ -0,0 +1,34 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright: Damien Elmes <anki@ichi2.net>
|
||||
# License: GNU GPL, version 3 or later; http://www.gnu.org/copyleft/gpl.html
|
||||
|
||||
# FIXME:
|
||||
# - check if we have to int(time)
|
||||
# - port all the code referencing the old tables
|
||||
|
||||
import time
|
||||
from anki.db import *
|
||||
|
||||
FACT = 0
|
||||
CARD = 1
|
||||
MODEL = 2
|
||||
MEDIA = 3
|
||||
GROUP = 4
|
||||
GROUPCONFIG = 5
|
||||
|
||||
gravestonesTable = Table(
|
||||
'gravestones', metadata,
|
||||
Column('delTime', Integer, nullable=False),
|
||||
Column('objectId', Integer, nullable=False),
|
||||
Column('type', Integer, nullable=False))
|
||||
|
||||
def registerOne(db, type, id):
|
||||
db.statement("insert into gravestones values (:t, :id, :ty)",
|
||||
t=time.time(), id=id, ty=type)
|
||||
|
||||
def registerMany(db, type, ids):
|
||||
db.statements("insert into gravestones values (:t, :id, :ty)",
|
||||
[{'t':time.time(), 'id':x, 'ty':type} for x in ids])
|
||||
|
||||
def forgetAll(db):
|
||||
db.statement("delete from gravestones")
|
54
anki/groups.py
Normal file
54
anki/groups.py
Normal file
|
@ -0,0 +1,54 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright: Damien Elmes <anki@ichi2.net>
|
||||
# License: GNU GPL, version 3 or later; http://www.gnu.org/copyleft/gpl.html
|
||||
|
||||
import simplejson, time
|
||||
from anki.db import *
|
||||
|
||||
groupsTable = Table(
|
||||
'groups', metadata,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('modified', Float, nullable=False, default=time.time),
|
||||
Column('name', UnicodeText, nullable=False),
|
||||
Column('confId', Integer, nullable=False))
|
||||
|
||||
# maybe define a random cutoff at say +/-30% which controls exit interval
|
||||
# variation - 30% of 1 day is 0.7 or 1.3 so always 1 day; 30% of 4 days is
|
||||
# 2.8-5.2, so any time from 3-5 days is acceptable
|
||||
|
||||
defaultConf = {
|
||||
'new': {
|
||||
'delays': [0.5, 3, 10],
|
||||
'ints': [1, 7, 4],
|
||||
},
|
||||
'lapse': {
|
||||
'delays': [0.5, 3, 10],
|
||||
'ints': [1, 7, 4],
|
||||
'mult': 0
|
||||
},
|
||||
'initialFactor': 2.5,
|
||||
'suspendLeeches': True,
|
||||
'leechFails': 16,
|
||||
}
|
||||
|
||||
groupConfigTable = Table(
|
||||
'groupConfig', metadata,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('modified', Float, nullable=False, default=time.time),
|
||||
Column('name', UnicodeText, nullable=False),
|
||||
Column('config', UnicodeText, nullable=False,
|
||||
default=unicode(simplejson.dumps(defaultConf))))
|
||||
|
||||
class GroupConfig(object):
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
self.id = genID()
|
||||
self.config = defaultConf
|
||||
|
||||
def save(self):
|
||||
self._config = simplejson.dumps(self.config)
|
||||
self.modified = time.time()
|
||||
|
||||
mapper(GroupConfig, groupConfigTable, properties={
|
||||
'_config': groupConfigTable.c.config,
|
||||
})
|
|
@ -22,12 +22,6 @@ mediaTable = Table(
|
|||
Column('modified', Float, nullable=False),
|
||||
Column('chksum', UnicodeText, nullable=False, default=u""))
|
||||
|
||||
mediaDeletedTable = Table(
|
||||
'mediaDeleted', metadata,
|
||||
Column('mediaId', Integer, ForeignKey("cards.id"),
|
||||
nullable=False),
|
||||
Column('deletedTime', Float, nullable=False))
|
||||
|
||||
# File handling
|
||||
##########################################################################
|
||||
|
||||
|
|
|
@ -153,37 +153,13 @@ def formatQA(cid, mid, fact, tags, cm, deck):
|
|||
# Model table
|
||||
##########################################################################
|
||||
|
||||
# maybe define a random cutoff at say +/-30% which controls exit interval
|
||||
# variation - 30% of 1 day is 0.7 or 1.3 so always 1 day; 30% of 4 days is
|
||||
# 2.8-5.2, so any time from 3-5 days is acceptable
|
||||
|
||||
# collapse time should be bigger than default failSchedule
|
||||
|
||||
# need to think about failed interval handling - if the final drill is
|
||||
# optional, what intervals should the default be? 3 days or more if cards are
|
||||
# over that interval range? and what about failed mature bonus?
|
||||
|
||||
defaultConf = {
|
||||
'new': {
|
||||
'delays': [0.5, 3, 10],
|
||||
'ints': [1, 7, 4],
|
||||
},
|
||||
'lapse': {
|
||||
'delays': [0.5, 3, 10],
|
||||
'ints': [1, 7, 4],
|
||||
'mult': 0
|
||||
},
|
||||
'initialFactor': 2.5,
|
||||
}
|
||||
|
||||
modelsTable = Table(
|
||||
'models', metadata,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('created', Float, nullable=False, default=time.time),
|
||||
Column('modified', Float, nullable=False, default=time.time),
|
||||
Column('name', UnicodeText, nullable=False),
|
||||
Column('config', UnicodeText, nullable=False,
|
||||
default=unicode(simplejson.dumps(defaultConf))),
|
||||
# currently unused
|
||||
Column('config', UnicodeText, nullable=False, default=u"")
|
||||
)
|
||||
|
||||
class Model(object):
|
||||
|
@ -219,12 +195,3 @@ mapper(Model, modelsTable, properties={
|
|||
order_by=[cardModelsTable.c.ordinal],
|
||||
cascade="all, delete-orphan"),
|
||||
})
|
||||
|
||||
# Model deletions
|
||||
##########################################################################
|
||||
|
||||
modelsDeletedTable = Table(
|
||||
'modelsDeleted', metadata,
|
||||
Column('modelId', Integer, ForeignKey("models.id"),
|
||||
nullable=False),
|
||||
Column('deletedTime', Float, nullable=False))
|
||||
|
|
|
@ -38,7 +38,7 @@ class Scheduler(object):
|
|||
return card
|
||||
|
||||
def reset(self):
|
||||
self.modelConfigs = {}
|
||||
self.resetConfig()
|
||||
self.resetLearn()
|
||||
self.resetReview()
|
||||
self.resetNew()
|
||||
|
@ -474,13 +474,18 @@ and queue between 1 and 2""",
|
|||
# Tools
|
||||
##########################################################################
|
||||
|
||||
def resetConfig(self):
|
||||
"Update group config cache."
|
||||
self.groupConfigs = dict(self.db.all("select id, confId from groups"))
|
||||
self.configCache = {}
|
||||
|
||||
def configForCard(self, card):
|
||||
mid = card.modelId
|
||||
if not mid in self.modelConfigs:
|
||||
self.modelConfigs[mid] = simplejson.loads(
|
||||
self.db.scalar("select config from models where id = :id",
|
||||
id=mid))
|
||||
return self.modelConfigs[mid]
|
||||
id = self.groupConfigs[card.groupId]
|
||||
if id not in self.configCache:
|
||||
self.configCache[id] = simplejson.loads(
|
||||
self.db.scalar("select config from groupConfig where id = :id",
|
||||
id=id))
|
||||
return self.configCache[id]
|
||||
|
||||
def resetSchedBuried(self):
|
||||
"Put temporarily suspended cards back into play."
|
||||
|
|
20
anki/tags.py
20
anki/tags.py
|
@ -5,26 +5,6 @@
|
|||
from anki.db import *
|
||||
|
||||
# Type: 0=fact, 1=model, 2=template
|
||||
# Priority: -100 to 100
|
||||
|
||||
# older sqlalchemy versions didn't support collate for sqlite, so we do it
|
||||
# manually
|
||||
def initTagTables(s):
|
||||
try:
|
||||
s.execute("""
|
||||
create table tags (
|
||||
id integer not null,
|
||||
name text not null collate nocase unique,
|
||||
priority integer not null default 0,
|
||||
primary key(id))""")
|
||||
s.execute("""
|
||||
create table cardTags (
|
||||
cardId integer not null,
|
||||
tagId integer not null,
|
||||
type integer not null,
|
||||
primary key(tagId, cardId))""")
|
||||
except:
|
||||
pass
|
||||
|
||||
def tagId(s, tag, create=True):
|
||||
"Return ID for tag, creating if necessary."
|
||||
|
|
|
@ -8,7 +8,6 @@ import time, simplejson
|
|||
from anki.db import *
|
||||
from anki.lang import _
|
||||
from anki.media import rebuildMediaDir
|
||||
from anki.tags import initTagTables
|
||||
|
||||
def moveTable(s, table):
|
||||
sql = s.scalar(
|
||||
|
@ -37,17 +36,16 @@ def upgradeSchema(engine, s):
|
|||
import cards
|
||||
metadata.create_all(engine, tables=[cards.cardsTable])
|
||||
s.execute("""
|
||||
insert into cards select id, factId,
|
||||
(select modelId from facts where facts.id = cards2.factId),
|
||||
cardModelId, created, modified,
|
||||
question, answer, ordinal, 0, relativeDelay, type, due, interval,
|
||||
factor, reps, successive, noCount, 0, 0 from cards2""")
|
||||
insert into cards select id, factId, 1, cardModelId, modified, question,
|
||||
answer, ordinal, 0, relativeDelay, type, due, interval, factor, reps,
|
||||
successive, noCount, 0, 0 from cards2""")
|
||||
s.execute("drop table cards2")
|
||||
# tags
|
||||
###########
|
||||
moveTable(s, "tags")
|
||||
moveTable(s, "cardTags")
|
||||
initTagTables(s)
|
||||
import deck
|
||||
deck.DeckStorage._addTables(engine)
|
||||
s.execute("insert or ignore into tags select id, tag, 0 from tags2")
|
||||
s.execute("""
|
||||
insert or ignore into cardTags select cardId, tagId, src from cardTags2""")
|
||||
|
@ -80,8 +78,7 @@ originalPath from media2""")
|
|||
import models
|
||||
metadata.create_all(engine, tables=[models.modelsTable])
|
||||
s.execute("""
|
||||
insert or ignore into models select id, created, modified, name,
|
||||
:c from models2""", {'c':simplejson.dumps(models.defaultConf)})
|
||||
insert or ignore into models select id, modified, name, "" from models2""")
|
||||
s.execute("drop table models2")
|
||||
|
||||
return ver
|
||||
|
@ -149,13 +146,7 @@ create index if not exists ix_fields_chksum on fields (chksum)""")
|
|||
create index if not exists ix_media_chksum on media (chksum)""")
|
||||
# deletion tracking
|
||||
db.execute("""
|
||||
create index if not exists ix_cardsDeleted_cardId on cardsDeleted (cardId)""")
|
||||
db.execute("""
|
||||
create index if not exists ix_modelsDeleted_modelId on modelsDeleted (modelId)""")
|
||||
db.execute("""
|
||||
create index if not exists ix_factsDeleted_factId on factsDeleted (factId)""")
|
||||
db.execute("""
|
||||
create index if not exists ix_mediaDeleted_factId on mediaDeleted (mediaId)""")
|
||||
create index if not exists ix_gravestones_delTime on gravestones (delTime)""")
|
||||
# tags
|
||||
db.execute("""
|
||||
create index if not exists ix_cardTags_cardId on cardTags (cardId)""")
|
||||
|
@ -209,12 +200,17 @@ cast(min(thinkingTime, 60)*1000 as int), 0 from reviewHistory""")
|
|||
# remove queueDue as it's become dynamic, and type index
|
||||
deck.db.statement("drop index if exists ix_cards_queueDue")
|
||||
deck.db.statement("drop index if exists ix_cards_type")
|
||||
|
||||
# remove old deleted tables
|
||||
for t in ("cards", "facts", "models", "media"):
|
||||
deck.db.statement("drop table if exists %sDeleted" % t)
|
||||
# finally, update indices & optimize
|
||||
updateIndices(deck.db)
|
||||
# setup limits & config for dynamicIndices()
|
||||
deck.limits = simplejson.loads(deck._limits)
|
||||
deck.config = simplejson.loads(deck._config)
|
||||
# add default config
|
||||
import deck as deckMod
|
||||
deckMod.DeckStorage._addConfig(deck.engine)
|
||||
|
||||
deck.updateDynamicIndices()
|
||||
deck.db.execute("vacuum")
|
||||
|
|
Loading…
Reference in a new issue