rename deck.s to a more understable deck.db; keep s for compat

This commit is contained in:
Damien Elmes 2011-02-19 16:27:28 +09:00
parent b9cf5ad85d
commit f828393de3
14 changed files with 459 additions and 468 deletions

File diff suppressed because it is too large Load diff

View file

@ -49,10 +49,10 @@ class Exporter(object):
if self.limitCardIds:
return self.limitCardIds
if not self.limitTags:
cards = self.deck.s.column0("select id from cards")
cards = self.deck.db.column0("select id from cards")
else:
d = tagIds(self.deck.s, self.limitTags, create=False)
cards = self.deck.s.column0(
d = tagIds(self.deck.db, self.limitTags, create=False)
cards = self.deck.db.column0(
"select cardId from cardTags where tagid in %s" %
ids2str(d.values()))
self.count = len(cards)
@ -84,7 +84,7 @@ class AnkiExporter(Exporter):
client.setServer(server)
client.localTime = self.deck.modified
client.remoteTime = 0
self.deck.s.flush()
self.deck.db.flush()
# set up a custom change list and sync
lsum = self.localSummary()
rsum = server.summary(0)
@ -94,9 +94,9 @@ class AnkiExporter(Exporter):
res = server.applyPayload(payload)
if not self.includeSchedulingInfo:
self.deck.updateProgress()
self.newDeck.s.statement("""
self.newDeck.db.statement("""
delete from revlog""")
self.newDeck.s.statement("""
self.newDeck.db.statement("""
update cards set
interval = 0,
lastInterval = 0,
@ -134,25 +134,25 @@ modified = :now
self.newDeck.rebuildCounts()
self.exportedCards = self.newDeck.cardCount
self.newDeck.utcOffset = -1
self.newDeck.s.commit()
self.newDeck.db.commit()
self.newDeck.close()
self.deck.finishProgress()
def localSummary(self):
cardIds = self.cardIds()
cStrIds = ids2str(cardIds)
cards = self.deck.s.all("""
cards = self.deck.db.all("""
select id, modified from cards
where id in %s""" % cStrIds)
facts = self.deck.s.all("""
facts = self.deck.db.all("""
select facts.id, facts.modified from cards, facts where
facts.id = cards.factId and
cards.id in %s""" % cStrIds)
models = self.deck.s.all("""
models = self.deck.db.all("""
select models.id, models.modified from models, facts where
facts.modelId = models.id and
facts.id in %s""" % ids2str([f[0] for f in facts]))
media = self.deck.s.all("""
media = self.deck.db.all("""
select id, created from media""")
return {
# cards
@ -183,13 +183,13 @@ class TextCardExporter(Exporter):
strids = ids2str(ids)
self.deck.startProgress((len(ids) + 1) / 50)
self.deck.updateProgress(_("Exporting..."))
cards = self.deck.s.all("""
cards = self.deck.db.all("""
select cards.question, cards.answer, cards.id from cards
where cards.id in %s
order by cards.created""" % strids)
self.deck.updateProgress()
if self.includeTags:
self.cardTags = dict(self.deck.s.all("""
self.cardTags = dict(self.deck.db.all("""
select cards.id, facts.tags from cards, facts
where cards.factId = facts.id
and cards.id in %s
@ -222,7 +222,7 @@ class TextFactExporter(Exporter):
cardIds = self.cardIds()
self.deck.startProgress()
self.deck.updateProgress(_("Exporting..."))
facts = self.deck.s.all("""
facts = self.deck.db.all("""
select factId, value, facts.created from facts, fields
where
facts.id in
@ -233,7 +233,7 @@ order by factId, ordinal""" % ids2str(cardIds))
txt = ""
self.deck.updateProgress()
if self.includeTags:
self.factTags = dict(self.deck.s.all(
self.factTags = dict(self.deck.db.all(
"select id, tags from facts where id in %s" %
ids2str([fact[0] for fact in facts])))
groups = itertools.groupby(facts, itemgetter(0))

View file

@ -75,8 +75,8 @@ from cards c where relativeDelay = 1 and type >= 0 and interval > 21"""
young)
mature = self.deck._cardLimit("revActive", "revInactive",
mature)
young = self.deck.s.all(young)
mature = self.deck.s.all(mature)
young = self.deck.db.all(young)
mature = self.deck.db.all(mature)
for (src, dest) in [(young, daysYoung),
(mature, daysMature)]:
for (interval, due) in src:
@ -111,7 +111,7 @@ from cards c where relativeDelay = 1 and type >= 0 and interval > 21"""
*(int(x)for x in dr["day"].split("-")))).days, dr["reviewTime"]/60.0), dayReps))
def getDayReps(self):
return self.deck.s.all("""
return self.deck.db.all("""
select
count() as combinedNewReps,
date(time-:off, "unixepoch") as day,
@ -238,7 +238,7 @@ group by day order by day
days = {}
fig = Figure(figsize=(self.width, self.height), dpi=self.dpi)
limit = self.endOfDay - (numdays) * 86400
res = self.deck.s.column0("select %s from cards where %s >= %f" %
res = self.deck.db.column0("select %s from cards where %s >= %f" %
(attr, attr, limit))
for r in res:
d = int((r - self.endOfDay) / 86400.0)
@ -361,7 +361,7 @@ group by day order by day
arr = [0] * arrsize
colours = [easesNewC, easesYoungC, easesMatureC]
bars = []
eases = self.deck.s.all("""
eases = self.deck.db.all("""
select (case when rep = 1 then 0 when lastInterval <= 21 then 1 else 2 end)
as type, ease, count() from revlog group by type, ease""")
d = {}

View file

@ -76,7 +76,7 @@ class Importer(object):
cards = self.foreignCards()
# grab data from db
self.deck.updateProgress()
fields = self.deck.s.all("""
fields = self.deck.db.all("""
select factId, value from fields where fieldModelId = :id
and value != ''""",
id=self.updateKey[1])
@ -123,7 +123,7 @@ and value != ''""",
'v': c.fields[index],
'chk': self.maybeChecksum(c.fields[index], fm.unique)}
for (fid, c) in upcards]
self.deck.s.execute("""
self.deck.db.execute("""
update fields set value = :v, chksum = :chk where factId = :fid
and fieldModelId = :fmid""", data)
# update tags
@ -132,12 +132,12 @@ and fieldModelId = :fmid""", data)
data = [{'fid': fid,
't': c.fields[tagsIdx]}
for (fid, c) in upcards]
self.deck.s.execute(
self.deck.db.execute(
"update facts set tags = :t where id = :fid",
data)
# rebuild caches
self.deck.updateProgress()
cids = self.deck.s.column0(
cids = self.deck.db.column0(
"select id from cards where factId in %s" %
ids2str(fids))
self.deck.updateCardTags(cids)
@ -238,12 +238,12 @@ The current importer only supports a single active card template. Please disable
d['created'] = tmp[0]
factCreated[d['id']] = d['created']
return d
self.deck.s.execute(factsTable.insert(),
self.deck.db.execute(factsTable.insert(),
[fudgeCreated({'modelId': self.model.id,
'tags': canonifyTags(self.tagsToAdd + " " + cards[n].tags),
'id': factIds[n]}) for n in range(len(cards))])
self.deck.factCount += len(factIds)
self.deck.s.execute("""
self.deck.db.execute("""
delete from factsDeleted
where factId in (%s)""" % ",".join([str(s) for s in factIds]))
# add all the fields
@ -264,7 +264,7 @@ where factId in (%s)""" % ",".join([str(s) for s in factIds]))
cards[m].fields[index] or u"", fm.unique)
}
for m in range(len(cards))]
self.deck.s.execute(fieldsTable.insert(),
self.deck.db.execute(fieldsTable.insert(),
data)
# and cards
self.deck.updateProgress()
@ -281,7 +281,7 @@ where factId in (%s)""" % ",".join([str(s) for s in factIds]))
'question': u"",
'answer': u""
},cards[m]) for m in range(len(cards))]
self.deck.s.execute(cardsTable.insert(),
self.deck.db.execute(cardsTable.insert(),
data)
self.deck.updateProgress()
self.deck.updateCardsFromFactIds(factIds)
@ -334,7 +334,7 @@ where factId in (%s)""" % ",".join([str(s) for s in factIds]))
def getUniqueCache(self, field):
"Return a dict with all fields, to test for uniqueness."
return dict(self.deck.s.all(
return dict(self.deck.db.all(
"select value, 1 from fields where fieldModelId = :fmid",
fmid=field.id))

View file

@ -33,7 +33,7 @@ class Anki10Importer(Importer):
src.s.execute("update models set modified = 1")
src.s.execute("update cards set modified = 1")
src.s.execute("update media set created = 1")
self.deck.s.flush()
self.deck.db.flush()
# set up a custom change list and sync
lsum = client.summary(0)
self._clearDeleted(lsum)
@ -57,13 +57,13 @@ class Anki10Importer(Importer):
fids = [f[0] for f in res['added-facts']['facts']]
self.deck.addTags(fids, self.tagsToAdd)
# mark import material as newly added
self.deck.s.statement(
self.deck.db.statement(
"update cards set modified = :t where id in %s" %
ids2str([x[0] for x in res['added-cards']]), t=time.time())
self.deck.s.statement(
self.deck.db.statement(
"update facts set modified = :t where id in %s" %
ids2str([x[0] for x in res['added-facts']['facts']]), t=time.time())
self.deck.s.statement(
self.deck.db.statement(
"update models set modified = :t where id in %s" %
ids2str([x['id'] for x in res['added-models']]), t=time.time())
# update total and refresh

View file

@ -218,7 +218,7 @@ if __name__ == '__main__':
i = DingsBumsImporter(mydeck, filename)
i.doImport()
assert 7 == i.total
mydeck.s.close()
mydeck.db.close()
print "... Finished"
sys.exit(1)

View file

@ -50,7 +50,7 @@ If a file with the same md5sum exists in the DB, return that.
If a file with the same name exists, return a unique name.
This does not modify the media table."""
# see if have duplicate contents
newpath = deck.s.scalar(
newpath = deck.db.scalar(
"select filename from media where originalPath = :cs",
cs=checksum(open(path, "rb").read()))
# check if this filename already exists
@ -85,9 +85,9 @@ def uniquePath(dir, base):
def updateMediaCount(deck, file, count=1):
mdir = deck.mediaDir()
if deck.s.scalar(
if deck.db.scalar(
"select 1 from media where filename = :file", file=file):
deck.s.statement(
deck.db.statement(
"update media set size = size + :c, created = :t where filename = :file",
file=file, c=count, t=time.time())
elif count > 0:
@ -96,18 +96,18 @@ def updateMediaCount(deck, file, count=1):
checksum(open(os.path.join(mdir, file), "rb").read()))
except:
sum = u""
deck.s.statement("""
deck.db.statement("""
insert into media (id, filename, size, created, originalPath, description)
values (:id, :file, :c, :mod, :sum, '')""",
id=genID(), file=file, c=count, mod=time.time(),
sum=sum)
def removeUnusedMedia(deck):
ids = deck.s.column0("select id from media where size = 0")
ids = deck.db.column0("select id from media where size = 0")
for id in ids:
deck.s.statement("insert into mediaDeleted values (:id, :t)",
deck.db.statement("insert into mediaDeleted values (:id, :t)",
id=id, t=time.time())
deck.s.statement("delete from media where size = 0")
deck.db.statement("delete from media where size = 0")
# String manipulation
##########################################################################
@ -147,7 +147,7 @@ def rebuildMediaDir(deck, delete=False, dirty=True):
return (0, 0)
deck.startProgress(title=_("Check Media DB"))
# set all ref counts to 0
deck.s.statement("update media set size = 0")
deck.db.statement("update media set size = 0")
# look through cards for media references
refs = {}
normrefs = {}
@ -155,7 +155,7 @@ def rebuildMediaDir(deck, delete=False, dirty=True):
if isinstance(s, unicode):
return unicodedata.normalize('NFD', s)
return s
for (question, answer) in deck.s.all(
for (question, answer) in deck.db.all(
"select question, answer from cards"):
for txt in (question, answer):
for f in mediaFiles(txt):
@ -186,7 +186,7 @@ def rebuildMediaDir(deck, delete=False, dirty=True):
removeUnusedMedia(deck)
# check md5s are up to date
update = []
for (file, created, md5) in deck.s.all(
for (file, created, md5) in deck.db.all(
"select filename, created, originalPath from media"):
path = os.path.join(mdir, file)
if not os.path.exists(path):
@ -198,13 +198,13 @@ def rebuildMediaDir(deck, delete=False, dirty=True):
if md5 != sum:
update.append({'f':file, 'sum':sum, 'c':time.time()})
if update:
deck.s.statements("""
deck.db.statements("""
update media set originalPath = :sum, created = :c where filename = :f""",
update)
# update deck and get return info
if dirty:
deck.flushMod()
nohave = deck.s.column0("select filename from media where originalPath = ''")
nohave = deck.db.column0("select filename from media where originalPath = ''")
deck.finishProgress()
return (nohave, unused)
@ -219,7 +219,7 @@ def downloadMissing(deck):
deck.startProgress()
missing = 0
grabbed = 0
for c, (f, sum) in enumerate(deck.s.all(
for c, (f, sum) in enumerate(deck.db.all(
"select filename, originalPath from media")):
path = os.path.join(mdir, f)
if not os.path.exists(path):
@ -247,7 +247,7 @@ def downloadRemote(deck):
mdir = deck.mediaDir(create=True)
refs = {}
deck.startProgress()
for (question, answer) in deck.s.all(
for (question, answer) in deck.db.all(
"select question, answer from cards"):
for txt in (question, answer):
for f in mediaFiles(txt, remote=True):
@ -267,7 +267,7 @@ def downloadRemote(deck):
failed.append(link)
deck.updateProgress(label=_("Download %d...") % c)
for (url, name) in passed:
deck.s.statement(
deck.db.statement(
"update fields set value = replace(value, :url, :name)",
url=url, name=name)
deck.updateProgress(label=_("Updating references..."))

View file

@ -239,8 +239,8 @@ class DeckStats(object):
if not test:
test = "lastInterval > 21"
head = "select count() from revlog where %s"
all = self.deck.s.scalar(head % test)
yes = self.deck.s.scalar((head % test) + " and ease > 1")
all = self.deck.db.scalar(head % test)
yes = self.deck.db.scalar((head % test) + " and ease > 1")
return (all, yes, yes/float(all)*100)
def getYoungCorrect(self):
@ -253,7 +253,7 @@ class DeckStats(object):
today = self.deck.failedCutoff
x = today + 86400*start
y = today + 86400*finish
return self.deck.s.scalar("""
return self.deck.db.scalar("""
select count(distinct(cast((time-:off)/86400 as integer))) from revlog
where time >= :x and time <= :y""",x=x,y=y, off=self.deck.utcOffset)
@ -261,12 +261,12 @@ where time >= :x and time <= :y""",x=x,y=y, off=self.deck.utcOffset)
now = datetime.datetime.today()
x = time.mktime((now + datetime.timedelta(start)).timetuple())
y = time.mktime((now + datetime.timedelta(finish)).timetuple())
return self.deck.s.scalar(
return self.deck.db.scalar(
"select count() from revlog where time >= :x and time <= :y",
x=x, y=y)
def getAverageInterval(self):
return self.deck.s.scalar(
return self.deck.db.scalar(
"select sum(interval) / count(interval) from cards "
"where cards.reps > 0") or 0
@ -305,32 +305,32 @@ where time >= :x and time <= :y""",x=x,y=y, off=self.deck.utcOffset)
return (time.time() - self.deck.created) / 86400.0
def getSumInverseRoundInterval(self):
return self.deck.s.scalar(
return self.deck.db.scalar(
"select sum(1/round(max(interval, 1)+0.5)) from cards "
"where cards.reps > 0 "
"and type >= 0") or 0
def getWorkloadPeriod(self, period):
cutoff = time.time() + 86400 * period
return (self.deck.s.scalar("""
return (self.deck.db.scalar("""
select count(id) from cards
where combinedDue < :cutoff
and type >= 0 and relativeDelay in (0,1)""", cutoff=cutoff) or 0) / float(period)
def getPastWorkloadPeriod(self, period):
cutoff = time.time() - 86400 * period
return (self.deck.s.scalar("""
return (self.deck.db.scalar("""
select count(*) from revlog
where time > :cutoff""", cutoff=cutoff) or 0) / float(period)
def getNewPeriod(self, period):
cutoff = time.time() - 86400 * period
return (self.deck.s.scalar("""
return (self.deck.db.scalar("""
select count(id) from cards
where created > :cutoff""", cutoff=cutoff) or 0)
def getFirstPeriod(self, period):
cutoff = time.time() - 86400 * period
return (self.deck.s.scalar("""
return (self.deck.db.scalar("""
select count(*) from revlog
where rep = 1 and time > :cutoff""", cutoff=cutoff) or 0)

View file

@ -175,14 +175,14 @@ class SyncTools(object):
"Facts missing after sync. Please run Tools>Advanced>Check DB.")
def missingFacts(self):
return self.deck.s.scalar(
return self.deck.db.scalar(
"select count() from cards where factId "+
"not in (select id from facts)");
def postSyncRefresh(self):
"Flush changes to DB, and reload object associations."
self.deck.s.flush()
self.deck.s.refresh(self.deck)
self.deck.db.flush()
self.deck.db.refresh(self.deck)
self.deck.currentModel
# Summaries
@ -225,7 +225,7 @@ class SyncTools(object):
for (key, sql) in cats:
if self.fullThreshold:
sql += " limit %d" % self.fullThreshold
ret = self.deck.s.all(sql, m=lastSync)
ret = self.deck.db.all(sql, m=lastSync)
if self.fullThreshold and self.fullThreshold == len(ret):
# theshold exceeded, abort early
return None
@ -303,7 +303,7 @@ class SyncTools(object):
def bundleModel(self, id, updateModified):
"Return a model representation suitable for transport."
mod = self.deck.s.query(Model).get(id)
mod = self.deck.db.query(Model).get(id)
# force load of lazy attributes
mod.fieldModels; mod.cardModels
m = self.dictFromObj(mod)
@ -332,7 +332,7 @@ class SyncTools(object):
self.applyDict(local, model)
self.mergeFieldModels(local, fms)
self.mergeCardModels(local, cms)
self.deck.s.statement(
self.deck.db.statement(
"delete from modelsDeleted where modelId in %s" %
ids2str([m['id'] for m in models]))
@ -404,10 +404,10 @@ class SyncTools(object):
modified = "modified"
factIds = ids2str(ids)
return {
'facts': self.realLists(self.deck.s.all("""
'facts': self.realLists(self.deck.db.all("""
select id, modelId, created, %s, tags, spaceUntil, lastCardId from facts
where id in %s""" % (modified, factIds))),
'fields': self.realLists(self.deck.s.all("""
'fields': self.realLists(self.deck.db.all("""
select id, factId, fieldModelId, ordinal, value, chksum from fields
where factId in %s""" % factIds))
}
@ -427,7 +427,7 @@ where factId in %s""" % factIds))
'spaceUntil': f[5] or "",
'lastCardId': f[6]
} for f in facts]
self.deck.s.execute("""
self.deck.db.execute("""
insert or replace into facts
(id, modelId, created, modified, tags, spaceUntil, lastCardId)
values
@ -446,16 +446,16 @@ values
'chksum': f[5]
} for f in fields]
# delete local fields since ids may have changed
self.deck.s.execute(
self.deck.db.execute(
"delete from fields where factId in %s" %
ids2str([f[0] for f in facts]))
# then update
self.deck.s.execute("""
self.deck.db.execute("""
insert into fields
(id, factId, fieldModelId, ordinal, value, chksum)
values
(:id, :factId, :fieldModelId, :ordinal, :value, :chksum)""", dlist)
self.deck.s.statement(
self.deck.db.statement(
"delete from factsDeleted where factId in %s" %
ids2str([f[0] for f in facts]))
@ -466,7 +466,7 @@ values
##########################################################################
def getCards(self, ids):
return self.realLists(self.deck.s.all("""
return self.realLists(self.deck.db.all("""
select id, factId, cardModelId, created, modified, tags, ordinal,
priority, interval, lastInterval, due, lastDue, factor,
firstAnswered, reps, successive, averageTime, reviewTime, youngEase0,
@ -516,7 +516,7 @@ from cards where id in %s""" % ids2str(ids)))
'combinedDue': c[35],
'rd': c[36],
} for c in cards]
self.deck.s.execute("""
self.deck.db.execute("""
insert or replace into cards
(id, factId, cardModelId, created, modified, tags, ordinal,
priority, interval, lastInterval, due, lastDue, factor,
@ -533,7 +533,7 @@ values
:matureEase1, :matureEase2, :matureEase3, :matureEase4, :yesCount,
:noCount, :question, :answer, :lastFactor, :spaceUntil,
:type, :combinedDue, :rd, 0)""", dlist)
self.deck.s.statement(
self.deck.db.statement(
"delete from cardsDeleted where cardId in %s" %
ids2str([c[0] for c in cards]))
@ -551,39 +551,30 @@ values
# and ensure lastSync is greater than modified
self.deck.lastSync = max(time.time(), self.deck.modified+1)
d = self.dictFromObj(self.deck)
del d['Session']
del d['engine']
del d['s']
del d['path']
del d['syncName']
del d['version']
if 'newQueue' in d:
del d['newQueue']
del d['failedQueue']
del d['revQueue']
# these may be deleted before bundling
if 'css' in d: del d['css']
if 'models' in d: del d['models']
if 'currentModel' in d: del d['currentModel']
for bad in ("Session", "engine", "s", "db", "path", "syncName",
"version", "newQueue", "failedQueue", "revQueue",
"css", "models", "currentModel"):
if bad in d:
del d[bad]
keys = d.keys()
for k in keys:
if isinstance(d[k], types.MethodType):
del d[k]
d['meta'] = self.realLists(self.deck.s.all("select * from deckVars"))
d['meta'] = self.realLists(self.deck.db.all("select * from deckVars"))
return d
def updateDeck(self, deck):
if 'meta' in deck:
meta = deck['meta']
for (k,v) in meta:
self.deck.s.statement("""
self.deck.db.statement("""
insert or replace into deckVars
(key, value) values (:k, :v)""", k=k, v=v)
del deck['meta']
self.applyDict(self.deck, deck)
def bundleHistory(self):
return self.realLists(self.deck.s.all("""
return self.realLists(self.deck.db.all("""
select * from revlog where time > :ls""",
ls=self.deck.lastSync))
@ -599,18 +590,18 @@ select * from revlog where time > :ls""",
'flags': h[8]} for h in history]
if not dlist:
return
self.deck.s.statements("""
self.deck.db.statements("""
insert or ignore into revlog values
(:time, :cardId, :ease, :rep, :lastInterval, :interval, :factor,
:userTime, :flags)""",
dlist)
def bundleSources(self):
return self.realLists(self.deck.s.all("select * from sources"))
return self.realLists(self.deck.db.all("select * from sources"))
def updateSources(self, sources):
for s in sources:
self.deck.s.statement("""
self.deck.db.statement("""
insert or replace into sources values
(:id, :name, :created, :lastSync, :syncPeriod)""",
id=s[0],
@ -623,7 +614,7 @@ insert or replace into sources values
##########################################################################
def getMedia(self, ids):
return [tuple(row) for row in self.deck.s.all("""
return [tuple(row) for row in self.deck.db.all("""
select id, filename, size, created, originalPath, description
from media where id in %s""" % ids2str(ids))]
@ -640,24 +631,24 @@ from media where id in %s""" % ids2str(ids))]
'description': m[5]})
# apply metadata
if meta:
self.deck.s.statements("""
self.deck.db.statements("""
insert or replace into media (id, filename, size, created,
originalPath, description)
values (:id, :filename, :size, :created, :originalPath,
:description)""", meta)
self.deck.s.statement(
self.deck.db.statement(
"delete from mediaDeleted where mediaId in %s" %
ids2str([m[0] for m in media]))
def deleteMedia(self, ids):
sids = ids2str(ids)
files = self.deck.s.column0(
files = self.deck.db.column0(
"select filename from media where id in %s" % sids)
self.deck.s.statement("""
self.deck.db.statement("""
insert into mediaDeleted
select id, :now from media
where media.id in %s""" % sids, now=time.time())
self.deck.s.execute(
self.deck.db.execute(
"delete from media where id in %s" % sids)
# One-way syncing (sharing)
@ -824,7 +815,7 @@ and cards.id in %s""" % ids2str([c[0] for c in cards])))
for l in sum.values():
if len(l) > 1000:
return True
if self.deck.s.scalar(
if self.deck.db.scalar(
"select count() from revlog where time > :ls",
ls=self.deck.lastSync) > 1000:
return True
@ -835,7 +826,7 @@ and cards.id in %s""" % ids2str([c[0] for c in cards])))
t = time.time()
# ensure modified is not greater than server time
self.deck.modified = min(self.deck.modified, self.server.timestamp)
self.deck.s.commit()
self.deck.db.commit()
self.deck.close()
fields = {
"p": self.server.password,

View file

@ -23,56 +23,56 @@ def upgradeSchema(s):
def updateIndices(deck):
"Add indices to the DB."
# counts, failed cards
deck.s.statement("""
deck.db.statement("""
create index if not exists ix_cards_typeCombined on cards
(type, combinedDue, factId)""")
# scheduler-agnostic type
deck.s.statement("""
deck.db.statement("""
create index if not exists ix_cards_relativeDelay on cards
(relativeDelay)""")
# index on modified, to speed up sync summaries
deck.s.statement("""
deck.db.statement("""
create index if not exists ix_cards_modified on cards
(modified)""")
deck.s.statement("""
deck.db.statement("""
create index if not exists ix_facts_modified on facts
(modified)""")
# card spacing
deck.s.statement("""
deck.db.statement("""
create index if not exists ix_cards_factId on cards (factId)""")
# fields
deck.s.statement("""
deck.db.statement("""
create index if not exists ix_fields_factId on fields (factId)""")
deck.s.statement("""
deck.db.statement("""
create index if not exists ix_fields_fieldModelId on fields (fieldModelId)""")
deck.s.statement("""
deck.db.statement("""
create index if not exists ix_fields_chksum on fields (chksum)""")
# media
deck.s.statement("""
deck.db.statement("""
create unique index if not exists ix_media_filename on media (filename)""")
deck.s.statement("""
deck.db.statement("""
create index if not exists ix_media_originalPath on media (originalPath)""")
# deletion tracking
deck.s.statement("""
deck.db.statement("""
create index if not exists ix_cardsDeleted_cardId on cardsDeleted (cardId)""")
deck.s.statement("""
deck.db.statement("""
create index if not exists ix_modelsDeleted_modelId on modelsDeleted (modelId)""")
deck.s.statement("""
deck.db.statement("""
create index if not exists ix_factsDeleted_factId on factsDeleted (factId)""")
deck.s.statement("""
deck.db.statement("""
create index if not exists ix_mediaDeleted_factId on mediaDeleted (mediaId)""")
# tags
txt = "create unique index if not exists ix_tags_tag on tags (tag)"
try:
deck.s.statement(txt)
deck.db.statement(txt)
except:
deck.s.statement("""
deck.db.statement("""
delete from tags where exists (select 1 from tags t2 where tags.tag = t2.tag
and tags.rowid > t2.rowid)""")
deck.s.statement(txt)
deck.s.statement("""
deck.db.statement(txt)
deck.db.statement("""
create index if not exists ix_cardTags_tagCard on cardTags (tagId, cardId)""")
deck.s.statement("""
deck.db.statement("""
create index if not exists ix_cardTags_cardId on cardTags (cardId)""")
def upgradeDeck(deck):
@ -92,13 +92,13 @@ def upgradeDeck(deck):
raise Exception("oldDeckVersion")
if deck.version < 44:
# leaner indices
deck.s.statement("drop index if exists ix_cards_factId")
deck.db.statement("drop index if exists ix_cards_factId")
deck.version = 44
deck.s.commit()
deck.db.commit()
if deck.version < 48:
deck.updateFieldCache(deck.s.column0("select id from facts"))
deck.updateFieldCache(deck.db.column0("select id from facts"))
deck.version = 48
deck.s.commit()
deck.db.commit()
if deck.version < 52:
dname = deck.name()
sname = deck.syncName
@ -121,20 +121,20 @@ this message. (ERR-0101)""") % {
elif sname:
deck.enableSyncing()
deck.version = 52
deck.s.commit()
deck.db.commit()
if deck.version < 53:
if deck.getBool("perDay"):
if deck.hardIntervalMin == 0.333:
deck.hardIntervalMin = max(1.0, deck.hardIntervalMin)
deck.hardIntervalMax = max(1.1, deck.hardIntervalMax)
deck.version = 53
deck.s.commit()
deck.db.commit()
if deck.version < 54:
# broken versions of the DB orm die if this is a bool with a
# non-int value
deck.s.statement("update fieldModels set editFontFamily = 1");
deck.db.statement("update fieldModels set editFontFamily = 1");
deck.version = 54
deck.s.commit()
deck.db.commit()
if deck.version < 61:
# do our best to upgrade templates to the new style
txt = '''\
@ -175,87 +175,87 @@ this message. (ERR-0101)""") % {
# rebuild the media db based on new format
rebuildMediaDir(deck, dirty=False)
deck.version = 61
deck.s.commit()
deck.db.commit()
if deck.version < 62:
# updated indices
deck.s.statement("drop index if exists ix_cards_typeCombined")
deck.db.statement("drop index if exists ix_cards_typeCombined")
updateIndices(deck)
deck.version = 62
deck.s.commit()
deck.db.commit()
if deck.version < 64:
# remove old static indices, as all clients should be libanki1.2+
for d in ("ix_cards_duePriority",
"ix_cards_priorityDue"):
deck.s.statement("drop index if exists %s" % d)
deck.db.statement("drop index if exists %s" % d)
deck.version = 64
deck.s.commit()
deck.db.commit()
# note: we keep the priority index for now
if deck.version < 65:
# we weren't correctly setting relativeDelay when answering cards
# in previous versions, so ensure everything is set correctly
deck.rebuildTypes()
deck.version = 65
deck.s.commit()
deck.db.commit()
# skip a few to allow for updates to stable tree
if deck.version < 70:
# update dynamic indices given we don't use priority anymore
for d in ("intervalDesc", "intervalAsc", "randomOrder",
"dueAsc", "dueDesc"):
deck.s.statement("drop index if exists ix_cards_%s2" % d)
deck.s.statement("drop index if exists ix_cards_%s" % d)
deck.db.statement("drop index if exists ix_cards_%s2" % d)
deck.db.statement("drop index if exists ix_cards_%s" % d)
deck.updateDynamicIndices()
# remove old views
for v in ("failedCards", "revCardsOld", "revCardsNew",
"revCardsDue", "revCardsRandom", "acqCardsRandom",
"acqCardsOld", "acqCardsNew"):
deck.s.statement("drop view if exists %s" % v)
deck.db.statement("drop view if exists %s" % v)
deck.version = 70
deck.s.commit()
deck.db.commit()
if deck.version < 71:
# remove the expensive value cache
deck.s.statement("drop index if exists ix_fields_value")
deck.db.statement("drop index if exists ix_fields_value")
# add checksums and index
deck.updateAllFieldChecksums()
updateIndices(deck)
deck.s.execute("vacuum")
deck.s.execute("analyze")
deck.db.execute("vacuum")
deck.db.execute("analyze")
deck.version = 71
deck.s.commit()
deck.db.commit()
if deck.version < 72:
# this was only used for calculating average factor
deck.s.statement("drop index if exists ix_cards_factor")
deck.db.statement("drop index if exists ix_cards_factor")
deck.version = 72
deck.s.commit()
deck.db.commit()
if deck.version < 73:
# remove stats, as it's all in the revlog now
deck.s.statement("drop index if exists ix_stats_typeDay")
deck.s.statement("drop table if exists stats")
deck.db.statement("drop index if exists ix_stats_typeDay")
deck.db.statement("drop table if exists stats")
deck.version = 73
deck.s.commit()
deck.db.commit()
if deck.version < 74:
# migrate revlog data to new table
deck.s.statement("""
deck.db.statement("""
insert into revlog select
time, cardId, ease, reps, lastInterval, nextInterval, nextFactor,
min(thinkingTime, 60), 0 from reviewHistory""")
deck.s.statement("drop table reviewHistory")
deck.db.statement("drop table reviewHistory")
# convert old ease0 into ease1
deck.s.statement("update revlog set ease = 1 where ease = 0")
deck.db.statement("update revlog set ease = 1 where ease = 0")
# remove priority index
deck.s.statement("drop index if exists ix_cards_priority")
deck.db.statement("drop index if exists ix_cards_priority")
deck.version = 74
deck.s.commit()
deck.db.commit()
# executing a pragma here is very slow on large decks, so we store
# our own record
if not deck.getInt("pageSize") == 4096:
deck.s.commit()
deck.s.execute("pragma page_size = 4096")
deck.s.execute("pragma legacy_file_format = 0")
deck.s.execute("vacuum")
deck.db.commit()
deck.db.execute("pragma page_size = 4096")
deck.db.execute("pragma legacy_file_format = 0")
deck.db.execute("vacuum")
deck.setVar("pageSize", 4096, mod=False)
deck.s.commit()
deck.db.commit()
if prog:
assert deck.modified == oldmod
deck.finishProgress()

View file

@ -78,7 +78,7 @@ def test_saveAs():
newDeck = deck.saveAs(path)
assert newDeck.cardCount == 1
# delete card
id = newDeck.s.scalar("select id from cards")
id = newDeck.db.scalar("select id from cards")
newDeck.deleteCard(id)
# save into new deck
newDeck2 = newDeck.saveAs(path2)
@ -93,7 +93,7 @@ def test_factAddDelete():
deck = DeckStorage.Deck()
deck.addModel(BasicModel())
# set rollback point
deck.s.commit()
deck.db.commit()
f = deck.newFact()
# empty fields
try:
@ -135,33 +135,33 @@ def test_fieldChecksum():
f = deck.newFact()
f['Front'] = u"new"; f['Back'] = u"new2"
deck.addFact(f)
(id, sum) = deck.s.first(
(id, sum) = deck.db.first(
"select id, chksum from fields where value = 'new'")
assert sum == "22af645d"
# empty field should have no checksum
f['Front'] = u""
deck.s.flush()
assert deck.s.scalar(
deck.db.flush()
assert deck.db.scalar(
"select chksum from fields where id = :id", id=id) == ""
# changing the value should change the checksum
f['Front'] = u"newx"
deck.s.flush()
assert deck.s.scalar(
deck.db.flush()
assert deck.db.scalar(
"select chksum from fields where id = :id", id=id) == "4b0e5a4c"
# back should have no checksum, because it's not set to be unique
(id, sum) = deck.s.first(
(id, sum) = deck.db.first(
"select id, chksum from fields where value = 'new2'")
assert sum == ""
# if we turn on unique, it should get a checksum
fm = f.model.fieldModels[1]
fm.unique = True
deck.updateFieldChecksums(fm.id)
assert deck.s.scalar(
assert deck.db.scalar(
"select chksum from fields where id = :id", id=id) == "82f2ec5f"
# and turning it off should zero the checksum again
fm.unique = False
deck.updateFieldChecksums(fm.id)
assert deck.s.scalar(
assert deck.db.scalar(
"select chksum from fields where id = :id", id=id) == ""
def test_modelAddDelete():
@ -176,7 +176,7 @@ def test_modelAddDelete():
deck.deleteModel(deck.currentModel)
deck.reset()
assert deck.cardCount == 0
deck.s.refresh(deck)
deck.db.refresh(deck)
def test_modelCopy():
deck = DeckStorage.Deck()
@ -263,7 +263,7 @@ def test_modelChange():
assert deck.modelUseCount(m2) == 1
assert deck.cardCount == 3
assert deck.factCount == 2
(q, a) = deck.s.first("""
(q, a) = deck.db.first("""
select question, answer from cards where factId = :id""",
id=f.id)
assert stripHTML(q) == u"e"

View file

@ -23,7 +23,7 @@ def test_csv():
# four problems - missing front, dupe front, wrong num of fields
assert len(i.log) == 4
assert i.total == 5
deck.s.close()
deck.close()
def test_csv_tags():
deck = DeckStorage.Deck()
@ -31,10 +31,10 @@ def test_csv_tags():
file = unicode(os.path.join(testDir, "importing/text-tags.txt"))
i = csvfile.TextImporter(deck, file)
i.doImport()
facts = deck.s.query(Fact).all()
facts = deck.db.query(Fact).all()
assert len(facts) == 2
assert facts[0].tags == "baz qux" or facts[1].tags == "baz qux"
deck.s.close()
deck.close()
def test_mnemosyne10():
deck = DeckStorage.Deck()
@ -43,7 +43,7 @@ def test_mnemosyne10():
i = mnemosyne10.Mnemosyne10Importer(deck, file)
i.doImport()
assert i.total == 5
deck.s.close()
deck.close()
def test_supermemo_xml_01_unicode():
deck = DeckStorage.Deck()
@ -54,7 +54,7 @@ def test_supermemo_xml_01_unicode():
i.doImport()
# only returning top-level elements?
assert i.total == 1
deck.s.close()
deck.close()
def test_anki10():
# though these are not modified, sqlite updates the mtime, so copy to tmp
@ -69,7 +69,7 @@ def test_anki10():
i = anki10.Anki10Importer(deck, file)
i.doImport()
assert i.total == 2
deck.s.rollback()
deck.db.rollback()
deck.close()
# import a deck into itself - 10-2 is the same as test10, but with one
# card answered and another deleted. nothing should be synced to client
@ -77,7 +77,7 @@ def test_anki10():
i = anki10.Anki10Importer(deck, file2)
i.doImport()
assert i.total == 0
deck.s.rollback()
deck.db.rollback()
def test_anki10_modtime():
deck1 = DeckStorage.Deck()
@ -101,9 +101,9 @@ def test_anki10_modtime():
i.doImport()
client.sync()
assert i.total == 1
assert deck2.s.scalar("select count(*) from cards") == 2
assert deck2.s.scalar("select count(*) from facts") == 2
assert deck2.s.scalar("select count(*) from models") == 2
assert deck2.db.scalar("select count(*) from cards") == 2
assert deck2.db.scalar("select count(*) from facts") == 2
assert deck2.db.scalar("select count(*) from models") == 2
def test_dingsbums():
deck = DeckStorage.Deck()
@ -113,7 +113,7 @@ def test_dingsbums():
i = dingsbums.DingsBumsImporter(deck, file)
i.doImport()
assert 7 == i.total
deck.s.close()
deck.close()
def test_updating():
# get the standard csv deck first
@ -129,11 +129,11 @@ def test_updating():
i.updateKey = (0, deck.currentModel.fieldModels[0].id)
i.multipleCardsAllowed = False
i.doImport()
ans = deck.s.scalar(
ans = deck.db.scalar(
u"select answer from cards where question like '%食べる%'")
assert "to ate" in ans
# try again with tags
i.updateKey = (0, deck.currentModel.fieldModels[0].id)
i.mapping[1] = 0
i.doImport()
deck.s.close()
deck.close()

View file

@ -33,7 +33,7 @@ def test_copy():
# new file
assert m.copyToMedia(deck, path) == "foo.jpg"
# dupe md5
deck.s.statement("""
deck.db.statement("""
insert into media values (null, 'foo.jpg', 0, 0, :sum, '')""",
sum=checksum("hello"))
path = os.path.join(dir, "bar.jpg")
@ -53,54 +53,54 @@ def test_db():
f['Back'] = u"back [sound:foo.jpg]"
deck.addFact(f)
# 1 entry in the media db, with two references, and missing file
assert deck.s.scalar("select count() from media") == 1
assert deck.s.scalar("select size from media") == 2
assert deck.s.scalar("select not originalPath from media")
assert deck.db.scalar("select count() from media") == 1
assert deck.db.scalar("select size from media") == 2
assert deck.db.scalar("select not originalPath from media")
# copy to media folder & check db
path = m.copyToMedia(deck, path)
m.rebuildMediaDir(deck)
# md5 should be set now
assert deck.s.scalar("select count() from media") == 1
assert deck.s.scalar("select size from media") == 2
assert deck.s.scalar("select originalPath from media")
assert deck.db.scalar("select count() from media") == 1
assert deck.db.scalar("select size from media") == 2
assert deck.db.scalar("select originalPath from media")
# edit the fact to remove a reference
f['Back'] = u""
f.setModified(True, deck)
deck.s.flush()
assert deck.s.scalar("select count() from media") == 1
assert deck.s.scalar("select size from media") == 1
deck.db.flush()
assert deck.db.scalar("select count() from media") == 1
assert deck.db.scalar("select size from media") == 1
# remove the front reference too
f['Front'] = u""
f.setModified(True, deck)
assert deck.s.scalar("select size from media") == 0
assert deck.db.scalar("select size from media") == 0
# add the reference back
f['Front'] = u"<img src='foo.jpg'>"
f.setModified(True, deck)
assert deck.s.scalar("select size from media") == 1
assert deck.db.scalar("select size from media") == 1
# detect file modifications
oldsum = deck.s.scalar("select originalPath from media")
oldsum = deck.db.scalar("select originalPath from media")
open(path, "w").write("world")
m.rebuildMediaDir(deck)
newsum = deck.s.scalar("select originalPath from media")
newsum = deck.db.scalar("select originalPath from media")
assert newsum and newsum != oldsum
# delete underlying file and check db
os.unlink(path)
m.rebuildMediaDir(deck)
# md5 should be gone again
assert deck.s.scalar("select count() from media") == 1
assert deck.s.scalar("select not originalPath from media")
assert deck.db.scalar("select count() from media") == 1
assert deck.db.scalar("select not originalPath from media")
# media db should pick up media defined via templates & bulk update
f['Back'] = u"bar.jpg"
f.setModified(True, deck)
deck.s.flush()
deck.db.flush()
# modify template & regenerate
assert deck.s.scalar("select count() from media") == 1
assert deck.s.scalar("select sum(size) from media") == 1
assert deck.db.scalar("select count() from media") == 1
assert deck.db.scalar("select sum(size) from media") == 1
deck.currentModel.cardModels[0].aformat=u'<img src="{{{Back}}}">'
deck.updateCardsFromModel(deck.currentModel)
assert deck.s.scalar("select sum(size) from media") == 2
assert deck.s.scalar("select count() from media") == 2
assert deck.db.scalar("select sum(size) from media") == 2
assert deck.db.scalar("select count() from media") == 2
deck.currentModel.cardModels[0].aformat=u'{{{Back}}}'
deck.updateCardsFromModel(deck.currentModel)
assert deck.s.scalar("select count() from media") == 2
assert deck.s.scalar("select sum(size) from media") == 1
assert deck.db.scalar("select count() from media") == 2
assert deck.db.scalar("select sum(size) from media") == 1

View file

@ -103,7 +103,7 @@ def test_localsync_deck():
c = deck1.getCard()
deck1.answerCard(c, 4)
client.sync()
assert deck2.s.scalar("select count(*) from revlog") == 1
assert deck2.db.scalar("select count(*) from revlog") == 1
# make sure meta data is synced
deck1.setVar("foo", 1)
assert deck1.getInt("foo") == 1
@ -164,15 +164,15 @@ def test_localsync_factsandcards():
assert deck1.factCount == 2 and deck1.cardCount == 4
assert deck2.factCount == 2 and deck2.cardCount == 4
# ensure the fact was copied across
f1 = deck1.s.query(Fact).first()
f2 = deck1.s.query(Fact).get(f1.id)
f1 = deck1.db.query(Fact).first()
f2 = deck1.db.query(Fact).get(f1.id)
f1['Front'] = u"myfront"
f1.setModified()
deck1.setModified()
client.sync()
deck1.rebuildCounts()
deck2.rebuildCounts()
f2 = deck1.s.query(Fact).get(f1.id)
f2 = deck1.db.query(Fact).get(f1.id)
assert f2['Front'] == u"myfront"
c1 = deck1.getCard()
c2 = deck2.getCard()
@ -226,8 +226,8 @@ def test_localsync_media():
assert len(os.listdir(deck2media)) == 1
client.sync()
# metadata should have been copied
assert deck1.s.scalar("select count(1) from media") == 3
assert deck2.s.scalar("select count(1) from media") == 3
assert deck1.db.scalar("select count(1) from media") == 3
assert deck2.db.scalar("select count(1) from media") == 3
# copy local files
copyLocalMedia(deck1, deck2)
assert len(os.listdir(deck1media)) == 2
@ -239,8 +239,8 @@ def test_localsync_media():
os.unlink(os.path.join(deck1media, "22161b29b0c18e068038021f54eee1ee.png"))
rebuildMediaDir(deck1)
client.sync()
assert deck1.s.scalar("select count(1) from media") == 3
assert deck2.s.scalar("select count(1) from media") == 3
assert deck1.db.scalar("select count(1) from media") == 3
assert deck2.db.scalar("select count(1) from media") == 3
# Remote tests
##########################################################################