mirror of
https://github.com/ankitects/anki.git
synced 2025-09-20 23:12:21 -04:00

SQLAlchemy is a great tool, but it wasn't a great fit for Anki: - We often had to drop down to raw SQL for performance reasons. - The DB cursors and results were wrapped, which incurred a sizable performance hit due to introspection. Operations like fetching 50k records from a hot cache were taking more than twice as long to complete. - We take advantage of sqlite-specific features, so SQL language abstraction is useless to us. - The anki schema is quite small, so manually saving and loading objects is not a big burden. In the process of porting to DBAPI, I've refactored the database schema: - App configuration data that we don't need in joins or bulk updates has been moved into JSON objects. This simplifies serializing, and means we won't need DB schema changes to store extra options in the future. This change obsoletes the deckVars table. - Renamed tables: -- fieldModels -> fields -- cardModels -> templates -- fields -> fdata - a number of attribute names have been shortened Classes like Card, Fact & Model remain. They maintain a reference to the deck. To write their state to the DB, call .flush(). Objects no longer have their modification time manually updated. Instead, the modification time is updated when they are flushed. This also applies to the deck. Decks will now save on close, because various operations that were done at deck load will be moved into deck close instead. Operations like undoing buried card are cheap on a hot cache, but expensive on startup. Programmatically you can call .close(save=False) to avoid a save and a modification bump. This will be useful for generating due counts. Because of the new saving behaviour, the save and save as options will be removed from the GUI in the future. The q/a cache and field cache generating has been centralized. Facts will automatically rebuild the cache on flush; models can do so with model.updateCache(). Media handling has also been reworked. It has moved into a MediaRegistry object, which the deck holds. Refcounting has been dropped - it meant we had to compare old and new value every time facts or models were changed, and existed for the sole purpose of not showing errors on a missing media download. Instead we just media.registerText(q+a) when it's updated. The download function will be expanded to ask the user if they want to continue after a certain number of files have failed to download, which should be an adequate alternative. And we now add the file into the media DB when it's copied to th emedia directory, not when the card is commited. This fixes duplicates a user would get if they added the same media to a card twice without adding the card. The old DeckStorage object had its upgrade code split in a previous commit; the opening and upgrading code has been merged back together, and put in a separate storage.py file. The correct way to open a deck now is import anki; d = anki.Deck(path). deck.getCard() -> deck.sched.getCard() same with answerCard deck.getCard(id) returns a Card object now. And the DB wrapper has had a few changes: - sql statements are a more standard DBAPI: - statement() -> execute() - statements() -> executemany() - called like execute(sql, 1, 2, 3) or execute(sql, a=1, b=2, c=3) - column0 -> list
1108 lines
39 KiB
Python
1108 lines
39 KiB
Python
# -*- coding: utf-8 -*-
|
|
# Copyright: Damien Elmes <anki@ichi2.net>
|
|
# License: GNU GPL, version 3 or later; http://www.gnu.org/copyleft/gpl.html
|
|
|
|
import zlib, re, urllib, urllib2, socket, simplejson, time, shutil
|
|
import os, base64, httplib, sys, tempfile, httplib, types
|
|
from datetime import date
|
|
import anki, anki.deck, anki.cards
|
|
from anki.errors import *
|
|
#from anki.models import Model, Field, Template
|
|
#from anki.facts import Fact
|
|
#from anki.cards import Card
|
|
from anki.utils import ids2str, hexifyID, checksum
|
|
#from anki.media import mediaFiles
|
|
from anki.lang import _
|
|
from hooks import runHook
|
|
|
|
if simplejson.__version__ < "1.7.3":
|
|
raise Exception("SimpleJSON must be 1.7.3 or later.")
|
|
|
|
CHUNK_SIZE = 32768
|
|
MIME_BOUNDARY = "Anki-sync-boundary"
|
|
SYNC_HOST = os.environ.get("SYNC_HOST") or "dev.ankiweb.net"
|
|
SYNC_PORT = int(os.environ.get("SYNC_PORT") or 80)
|
|
SYNC_URL = "http://%s:%d/sync/" % (SYNC_HOST, SYNC_PORT)
|
|
KEYS = ("models", "facts", "cards", "media")
|
|
|
|
# - need to add tags table syncing
|
|
|
|
##########################################################################
|
|
# Monkey-patch httplib to incrementally send instead of chewing up large
|
|
# amounts of memory, and track progress.
|
|
|
|
sendProgressHook = None
|
|
|
|
def incrementalSend(self, strOrFile):
|
|
if self.sock is None:
|
|
if self.auto_open:
|
|
self.connect()
|
|
else:
|
|
raise NotConnected()
|
|
if self.debuglevel > 0:
|
|
print "send:", repr(str)
|
|
try:
|
|
if (isinstance(strOrFile, str) or
|
|
isinstance(strOrFile, unicode)):
|
|
self.sock.sendall(strOrFile)
|
|
else:
|
|
cnt = 0
|
|
t = time.time()
|
|
while 1:
|
|
if sendProgressHook and time.time() - t > 1:
|
|
sendProgressHook(cnt)
|
|
t = time.time()
|
|
data = strOrFile.read(CHUNK_SIZE)
|
|
cnt += len(data)
|
|
if not data:
|
|
break
|
|
self.sock.sendall(data)
|
|
except socket.error, v:
|
|
if v[0] == 32: # Broken pipe
|
|
self.close()
|
|
raise
|
|
|
|
httplib.HTTPConnection.send = incrementalSend
|
|
|
|
def fullSyncProgressHook(cnt):
|
|
runHook("fullSyncProgress", "fromLocal", cnt)
|
|
|
|
##########################################################################
|
|
|
|
class SyncTools(object):
|
|
|
|
def __init__(self, deck=None):
|
|
self.deck = deck
|
|
self.diffs = {}
|
|
self.serverExcludedTags = []
|
|
self.timediff = 0
|
|
self.fullThreshold = 2000
|
|
|
|
# Control
|
|
##########################################################################
|
|
|
|
def setServer(self, server):
|
|
self.server = server
|
|
|
|
def sync(self):
|
|
"Sync two decks locally. Reimplement this for finer control."
|
|
if not self.prepareSync(0):
|
|
return
|
|
lsum = self.summary(self.deck.lastSync)
|
|
if lsum:
|
|
rsum = self.server.summary(self.deck.lastSync)
|
|
if not lsum or not rsum:
|
|
raise Exception("full sync required")
|
|
payload = self.genPayload((lsum, rsum))
|
|
res = self.server.applyPayload(payload)
|
|
self.applyPayloadReply(res)
|
|
self.deck.reset()
|
|
|
|
def prepareSync(self, timediff):
|
|
"Sync setup. True if sync needed."
|
|
self.localTime = self.modified()
|
|
self.remoteTime = self.server.modified()
|
|
if self.localTime == self.remoteTime:
|
|
return False
|
|
l = self._lastSync(); r = self.server._lastSync()
|
|
# set lastSync to the lower of the two sides, account for slow clocks,
|
|
# and assume it took up to 10 seconds for the reply to arrive
|
|
self.deck.lastSync = max(0, min(l, r) - timediff - 10)
|
|
return True
|
|
|
|
def genPayload(self, summaries):
|
|
(lsum, rsum) = summaries
|
|
payload = {}
|
|
# first, handle models, facts and cards
|
|
for key in KEYS:
|
|
diff = self.diffSummary(lsum, rsum, key)
|
|
payload["added-" + key] = self.getObjsFromKey(diff[0], key)
|
|
payload["deleted-" + key] = diff[1]
|
|
payload["missing-" + key] = diff[2]
|
|
self.deleteObjsFromKey(diff[3], key)
|
|
# handle the remainder
|
|
if self.localTime > self.remoteTime:
|
|
payload['history'] = self.bundleHistory()
|
|
payload['sources'] = self.bundleSources()
|
|
# finally, set new lastSync and bundle the deck info
|
|
payload['deck'] = self.bundleDeck()
|
|
return payload
|
|
|
|
def applyPayload(self, payload):
|
|
reply = {}
|
|
# model, facts and cards
|
|
for key in KEYS:
|
|
k = 'added-' + key
|
|
# send back any requested
|
|
if k in payload:
|
|
reply[k] = self.getObjsFromKey(
|
|
payload['missing-' + key], key)
|
|
self.updateObjsFromKey(payload['added-' + key], key)
|
|
self.deleteObjsFromKey(payload['deleted-' + key], key)
|
|
# send back deck-related stuff if it wasn't sent to us
|
|
if not 'deck' in payload:
|
|
reply['history'] = self.bundleHistory()
|
|
reply['sources'] = self.bundleSources()
|
|
# finally, set new lastSync and bundle the deck info
|
|
reply['deck'] = self.bundleDeck()
|
|
else:
|
|
self.updateDeck(payload['deck'])
|
|
self.updateHistory(payload['history'])
|
|
if 'sources' in payload:
|
|
self.updateSources(payload['sources'])
|
|
self.postSyncRefresh()
|
|
cardIds = [x[0] for x in payload['added-cards']]
|
|
self.deck.updateCardTags(cardIds)
|
|
return reply
|
|
|
|
def applyPayloadReply(self, reply):
|
|
# model, facts and cards
|
|
for key in KEYS:
|
|
k = 'added-' + key
|
|
# old version may not send media
|
|
if k in reply:
|
|
self.updateObjsFromKey(reply['added-' + key], key)
|
|
# deck
|
|
if 'deck' in reply:
|
|
self.updateDeck(reply['deck'])
|
|
self.updateHistory(reply['history'])
|
|
if 'sources' in reply:
|
|
self.updateSources(reply['sources'])
|
|
self.postSyncRefresh()
|
|
cardIds = [x[0] for x in reply['added-cards']]
|
|
self.deck.updateCardTags(cardIds)
|
|
if self.missingFacts() != 0:
|
|
raise Exception(
|
|
"Facts missing after sync. Please run Tools>Advanced>Check DB.")
|
|
|
|
def missingFacts(self):
|
|
return self.deck.db.scalar(
|
|
"select count() from cards where factId "+
|
|
"not in (select id from facts)");
|
|
|
|
def postSyncRefresh(self):
|
|
"Flush changes to DB, and reload object associations."
|
|
self.deck.db.flush()
|
|
self.deck.db.refresh(self.deck)
|
|
self.deck.currentModel
|
|
|
|
# Summaries
|
|
##########################################################################
|
|
|
|
def summary(self, lastSync):
|
|
"Generate a full summary of modtimes for two-way syncing."
|
|
# client may have selected an earlier sync time
|
|
self.deck.lastSync = lastSync
|
|
# return early if there's been a schema change
|
|
if self.deck.getFloat("schemaMod") > lastSync:
|
|
return None
|
|
d = {}
|
|
cats = [
|
|
# cards
|
|
("cards",
|
|
"select id, modified from cards where modified > :m"),
|
|
("delcards",
|
|
"select cardId, deletedTime from cardsDeleted "
|
|
"where deletedTime > :m"),
|
|
# facts
|
|
("facts",
|
|
"select id, modified from facts where modified > :m"),
|
|
("delfacts",
|
|
"select factId, deletedTime from factsDeleted "
|
|
"where deletedTime > :m"),
|
|
# models
|
|
("models",
|
|
"select id, modified from models where modified > :m"),
|
|
("delmodels",
|
|
"select modelId, deletedTime from modelsDeleted "
|
|
"where deletedTime > :m"),
|
|
# media
|
|
("media",
|
|
"select id, created from media where created > :m"),
|
|
("delmedia",
|
|
"select mediaId, deletedTime from mediaDeleted "
|
|
"where deletedTime > :m")
|
|
]
|
|
for (key, sql) in cats:
|
|
if self.fullThreshold:
|
|
sql += " limit %d" % self.fullThreshold
|
|
ret = self.deck.db.all(sql, m=lastSync)
|
|
if self.fullThreshold and self.fullThreshold == len(ret):
|
|
# theshold exceeded, abort early
|
|
return None
|
|
d[key] = self.realLists(ret)
|
|
return d
|
|
|
|
# Diffing
|
|
##########################################################################
|
|
|
|
def diffSummary(self, localSummary, remoteSummary, key):
|
|
# list of ids on both ends
|
|
lexists = localSummary[key]
|
|
ldeleted = localSummary["del"+key]
|
|
rexists = remoteSummary[key]
|
|
rdeleted = remoteSummary["del"+key]
|
|
ldeletedIds = dict(ldeleted)
|
|
rdeletedIds = dict(rdeleted)
|
|
# to store the results
|
|
locallyEdited = []
|
|
locallyDeleted = []
|
|
remotelyEdited = []
|
|
remotelyDeleted = []
|
|
# build a hash of all ids, with value (localMod, remoteMod).
|
|
# deleted/nonexisting cards are marked with a modtime of None.
|
|
ids = {}
|
|
for (id, mod) in rexists:
|
|
ids[id] = [None, mod]
|
|
for (id, mod) in rdeleted:
|
|
ids[id] = [None, None]
|
|
for (id, mod) in lexists:
|
|
if id in ids:
|
|
ids[id][0] = mod
|
|
else:
|
|
ids[id] = [mod, None]
|
|
for (id, mod) in ldeleted:
|
|
if id in ids:
|
|
ids[id][0] = None
|
|
else:
|
|
ids[id] = [None, None]
|
|
# loop through the hash, determining differences
|
|
for (id, (localMod, remoteMod)) in ids.items():
|
|
if localMod and remoteMod:
|
|
# changed/existing on both sides
|
|
if localMod < remoteMod:
|
|
remotelyEdited.append(id)
|
|
elif localMod > remoteMod:
|
|
locallyEdited.append(id)
|
|
elif localMod and not remoteMod:
|
|
# if it's missing on server or newer here, sync
|
|
if (id not in rdeletedIds or
|
|
rdeletedIds[id] < localMod):
|
|
locallyEdited.append(id)
|
|
else:
|
|
remotelyDeleted.append(id)
|
|
elif remoteMod and not localMod:
|
|
# if it's missing locally or newer there, sync
|
|
if (id not in ldeletedIds or
|
|
ldeletedIds[id] < remoteMod):
|
|
remotelyEdited.append(id)
|
|
else:
|
|
locallyDeleted.append(id)
|
|
else:
|
|
if id in ldeletedIds and id not in rdeletedIds:
|
|
locallyDeleted.append(id)
|
|
elif id in rdeletedIds and id not in ldeletedIds:
|
|
remotelyDeleted.append(id)
|
|
return (locallyEdited, locallyDeleted,
|
|
remotelyEdited, remotelyDeleted)
|
|
|
|
# Models
|
|
##########################################################################
|
|
|
|
def getModels(self, ids, updateModified=False):
|
|
return [self.bundleModel(id, updateModified) for id in ids]
|
|
|
|
def bundleModel(self, id, updateModified):
|
|
"Return a model representation suitable for transport."
|
|
mod = self.deck.db.query(Model).get(id)
|
|
# force load of lazy attributes
|
|
mod.fieldModels; mod.cardModels
|
|
m = self.dictFromObj(mod)
|
|
m['fieldModels'] = [self.bundleFieldModel(fm) for fm in m['fieldModels']]
|
|
m['cardModels'] = [self.bundleCardModel(fm) for fm in m['cardModels']]
|
|
if updateModified:
|
|
m['modified'] = time.time()
|
|
return m
|
|
|
|
def bundleFieldModel(self, fm):
|
|
d = self.dictFromObj(fm)
|
|
if 'model' in d: del d['model']
|
|
return d
|
|
|
|
def bundleCardModel(self, cm):
|
|
d = self.dictFromObj(cm)
|
|
if 'model' in d: del d['model']
|
|
return d
|
|
|
|
def updateModels(self, models):
|
|
for model in models:
|
|
local = self.getModel(model['id'])
|
|
# avoid overwriting any existing card/field models
|
|
fms = model['fieldModels']; del model['fieldModels']
|
|
cms = model['cardModels']; del model['cardModels']
|
|
self.applyDict(local, model)
|
|
self.mergeFieldModels(local, fms)
|
|
self.mergeCardModels(local, cms)
|
|
self.deck.db.execute(
|
|
"delete from modelsDeleted where modelId in %s" %
|
|
ids2str([m['id'] for m in models]))
|
|
|
|
def getModel(self, id, create=True):
|
|
"Return a local model with same ID, or create."
|
|
id = int(id)
|
|
for l in self.deck.models:
|
|
if l.id == id:
|
|
return l
|
|
if not create:
|
|
return
|
|
m = Model()
|
|
self.deck.models.append(m)
|
|
return m
|
|
|
|
def mergeFieldModels(self, model, fms):
|
|
ids = []
|
|
for fm in fms:
|
|
local = self.getFieldModel(model, fm)
|
|
self.applyDict(local, fm)
|
|
ids.append(fm['id'])
|
|
for fm in model.fieldModels:
|
|
if fm.id not in ids:
|
|
self.deck.deleteFieldModel(model, fm)
|
|
|
|
def getFieldModel(self, model, remote):
|
|
id = int(remote['id'])
|
|
for fm in model.fieldModels:
|
|
if fm.id == id:
|
|
return fm
|
|
fm = FieldModel()
|
|
model.addFieldModel(fm)
|
|
return fm
|
|
|
|
def mergeCardModels(self, model, cms):
|
|
ids = []
|
|
for cm in cms:
|
|
local = self.getCardModel(model, cm)
|
|
if not 'allowEmptyAnswer' in cm or cm['allowEmptyAnswer'] is None:
|
|
cm['allowEmptyAnswer'] = True
|
|
self.applyDict(local, cm)
|
|
ids.append(cm['id'])
|
|
for cm in model.cardModels:
|
|
if cm.id not in ids:
|
|
self.deck.deleteCardModel(model, cm)
|
|
|
|
def getCardModel(self, model, remote):
|
|
id = int(remote['id'])
|
|
for cm in model.cardModels:
|
|
if cm.id == id:
|
|
return cm
|
|
cm = CardModel()
|
|
model.addCardModel(cm)
|
|
return cm
|
|
|
|
def deleteModels(self, ids):
|
|
for id in ids:
|
|
model = self.getModel(id, create=False)
|
|
if model:
|
|
self.deck.deleteModel(model)
|
|
|
|
# Facts
|
|
##########################################################################
|
|
|
|
def getFacts(self, ids, updateModified=False):
|
|
if updateModified:
|
|
modified = time.time()
|
|
else:
|
|
modified = "modified"
|
|
factIds = ids2str(ids)
|
|
return {
|
|
'facts': self.realLists(self.deck.db.all("""
|
|
select id, modelId, created, %s, tags, spaceUntil, lastCardId from facts
|
|
where id in %s""" % (modified, factIds))),
|
|
'fields': self.realLists(self.deck.db.all("""
|
|
select id, factId, fieldModelId, ordinal, value, chksum from fields
|
|
where factId in %s""" % factIds))
|
|
}
|
|
|
|
def updateFacts(self, factsdict):
|
|
facts = factsdict['facts']
|
|
fields = factsdict['fields']
|
|
if not facts:
|
|
return
|
|
# update facts first
|
|
dlist = [{
|
|
'id': f[0],
|
|
'modelId': f[1],
|
|
'created': f[2],
|
|
'modified': f[3],
|
|
'tags': f[4],
|
|
'spaceUntil': f[5] or "",
|
|
'lastCardId': f[6]
|
|
} for f in facts]
|
|
self.deck.db.execute("""
|
|
insert or replace into facts
|
|
(id, modelId, created, modified, tags, spaceUntil, lastCardId)
|
|
values
|
|
(:id, :modelId, :created, :modified, :tags, :spaceUntil, :lastCardId)""", dlist)
|
|
# now fields
|
|
def chksum(f):
|
|
if len(f) > 5:
|
|
return f[5]
|
|
return self.deck.fieldChecksum(f[4])
|
|
dlist = [{
|
|
'id': f[0],
|
|
'factId': f[1],
|
|
'fieldModelId': f[2],
|
|
'ordinal': f[3],
|
|
'value': f[4],
|
|
'chksum': f[5]
|
|
} for f in fields]
|
|
# delete local fields since ids may have changed
|
|
self.deck.db.execute(
|
|
"delete from fields where factId in %s" %
|
|
ids2str([f[0] for f in facts]))
|
|
# then update
|
|
self.deck.db.execute("""
|
|
insert into fields
|
|
(id, factId, fieldModelId, ordinal, value, chksum)
|
|
values
|
|
(:id, :factId, :fieldModelId, :ordinal, :value, :chksum)""", dlist)
|
|
self.deck.db.execute(
|
|
"delete from factsDeleted where factId in %s" %
|
|
ids2str([f[0] for f in facts]))
|
|
|
|
def deleteFacts(self, ids):
|
|
self.deck.deleteFacts(ids)
|
|
|
|
# Cards
|
|
##########################################################################
|
|
|
|
def getCards(self, ids):
|
|
return self.realLists(self.deck.db.all("""
|
|
select id, factId, cardModelId, created, modified, tags, ordinal,
|
|
priority, interval, lastInterval, due, lastDue, factor,
|
|
firstAnswered, reps, successive, averageTime, reviewTime, youngEase0,
|
|
youngEase1, youngEase2, youngEase3, youngEase4, matureEase0,
|
|
matureEase1, matureEase2, matureEase3, matureEase4, yesCount, noCount,
|
|
question, answer, lastFactor, spaceUntil, type, combinedDue, relativeDelay
|
|
from cards where id in %s""" % ids2str(ids)))
|
|
|
|
def updateCards(self, cards):
|
|
if not cards:
|
|
return
|
|
dlist = [{'id': c[0],
|
|
'factId': c[1],
|
|
'cardModelId': c[2],
|
|
'created': c[3],
|
|
'modified': c[4],
|
|
'tags': c[5],
|
|
'ordinal': c[6],
|
|
'priority': c[7],
|
|
'interval': c[8],
|
|
'lastInterval': c[9],
|
|
'due': c[10],
|
|
'lastDue': c[11],
|
|
'factor': c[12],
|
|
'firstAnswered': c[13],
|
|
'reps': c[14],
|
|
'successive': c[15],
|
|
'averageTime': c[16],
|
|
'reviewTime': c[17],
|
|
'youngEase0': c[18],
|
|
'youngEase1': c[19],
|
|
'youngEase2': c[20],
|
|
'youngEase3': c[21],
|
|
'youngEase4': c[22],
|
|
'matureEase0': c[23],
|
|
'matureEase1': c[24],
|
|
'matureEase2': c[25],
|
|
'matureEase3': c[26],
|
|
'matureEase4': c[27],
|
|
'yesCount': c[28],
|
|
'noCount': c[29],
|
|
'question': c[30],
|
|
'answer': c[31],
|
|
'lastFactor': c[32],
|
|
'spaceUntil': c[33],
|
|
'type': c[34],
|
|
'combinedDue': c[35],
|
|
'rd': c[36],
|
|
} for c in cards]
|
|
self.deck.db.execute("""
|
|
insert or replace into cards
|
|
(id, factId, cardModelId, created, modified, tags, ordinal,
|
|
priority, interval, lastInterval, due, lastDue, factor,
|
|
firstAnswered, reps, successive, averageTime, reviewTime, youngEase0,
|
|
youngEase1, youngEase2, youngEase3, youngEase4, matureEase0,
|
|
matureEase1, matureEase2, matureEase3, matureEase4, yesCount, noCount,
|
|
question, answer, lastFactor, spaceUntil, type, combinedDue,
|
|
relativeDelay, isDue)
|
|
values
|
|
(:id, :factId, :cardModelId, :created, :modified, :tags, :ordinal,
|
|
:priority, :interval, :lastInterval, :due, :lastDue, :factor,
|
|
:firstAnswered, :reps, :successive, :averageTime, :reviewTime, :youngEase0,
|
|
:youngEase1, :youngEase2, :youngEase3, :youngEase4, :matureEase0,
|
|
:matureEase1, :matureEase2, :matureEase3, :matureEase4, :yesCount,
|
|
:noCount, :question, :answer, :lastFactor, :spaceUntil,
|
|
:type, :combinedDue, :rd, 0)""", dlist)
|
|
self.deck.db.execute(
|
|
"delete from cardsDeleted where cardId in %s" %
|
|
ids2str([c[0] for c in cards]))
|
|
|
|
def deleteCards(self, ids):
|
|
self.deck.deleteCards(ids)
|
|
|
|
# Deck/history
|
|
##########################################################################
|
|
|
|
def bundleDeck(self):
|
|
# ensure modified is not greater than server time
|
|
if getattr(self, "server", None) and getattr(
|
|
self.server, "timestamp", None):
|
|
self.deck.modified = min(self.deck.modified,self.server.timestamp)
|
|
# and ensure lastSync is greater than modified
|
|
self.deck.lastSync = max(time.time(), self.deck.modified+1)
|
|
d = self.dictFromObj(self.deck)
|
|
for bad in ("Session", "engine", "s", "db", "path", "syncName",
|
|
"version", "newQueue", "failedQueue", "revQueue",
|
|
"css", "models", "currentModel"):
|
|
if bad in d:
|
|
del d[bad]
|
|
keys = d.keys()
|
|
for k in keys:
|
|
if isinstance(d[k], types.MethodType):
|
|
del d[k]
|
|
d['meta'] = self.realLists(self.deck.db.all("select * from deckVars"))
|
|
return d
|
|
|
|
def updateDeck(self, deck):
|
|
if 'meta' in deck:
|
|
meta = deck['meta']
|
|
for (k,v) in meta:
|
|
self.deck.db.execute("""
|
|
insert or replace into deckVars
|
|
(key, value) values (:k, :v)""", k=k, v=v)
|
|
del deck['meta']
|
|
self.applyDict(self.deck, deck)
|
|
|
|
def bundleHistory(self):
|
|
return self.realLists(self.deck.db.all("""
|
|
select * from revlog where time > :ls""",
|
|
ls=self.deck.lastSync))
|
|
|
|
def updateHistory(self, history):
|
|
dlist = [{'time': h[0],
|
|
'cardId': h[1],
|
|
'ease': h[2],
|
|
'rep': h[3],
|
|
'lastInterval': h[4],
|
|
'interval': h[5],
|
|
'factor': h[6],
|
|
'userTime': h[7],
|
|
'flags': h[8]} for h in history]
|
|
if not dlist:
|
|
return
|
|
self.deck.db.execute("""
|
|
insert or ignore into revlog values
|
|
(:time, :cardId, :ease, :rep, :lastInterval, :interval, :factor,
|
|
:userTime, :flags)""",
|
|
dlist)
|
|
|
|
def bundleSources(self):
|
|
return self.realLists(self.deck.db.all("select * from sources"))
|
|
|
|
def updateSources(self, sources):
|
|
for s in sources:
|
|
self.deck.db.execute("""
|
|
insert or replace into sources values
|
|
(:id, :name, :created, :lastSync, :syncPeriod)""",
|
|
id=s[0],
|
|
name=s[1],
|
|
created=s[2],
|
|
lastSync=s[3],
|
|
syncPeriod=s[4])
|
|
|
|
# Media metadata
|
|
##########################################################################
|
|
|
|
def getMedia(self, ids):
|
|
return [tuple(row) for row in self.deck.db.all("""
|
|
select id, filename, size, created, originalPath, description
|
|
from media where id in %s""" % ids2str(ids))]
|
|
|
|
def updateMedia(self, media):
|
|
meta = []
|
|
for m in media:
|
|
# build meta
|
|
meta.append({
|
|
'id': m[0],
|
|
'filename': m[1],
|
|
'size': m[2],
|
|
'created': m[3],
|
|
'originalPath': m[4],
|
|
'description': m[5]})
|
|
# apply metadata
|
|
if meta:
|
|
self.deck.db.execute("""
|
|
insert or replace into media (id, filename, size, created,
|
|
originalPath, description)
|
|
values (:id, :filename, :size, :created, :originalPath,
|
|
:description)""", meta)
|
|
self.deck.db.execute(
|
|
"delete from mediaDeleted where mediaId in %s" %
|
|
ids2str([m[0] for m in media]))
|
|
|
|
def deleteMedia(self, ids):
|
|
sids = ids2str(ids)
|
|
files = self.deck.db.column0(
|
|
"select filename from media where id in %s" % sids)
|
|
self.deck.db.execute("""
|
|
insert into mediaDeleted
|
|
select id, :now from media
|
|
where media.id in %s""" % sids, now=time.time())
|
|
self.deck.db.execute(
|
|
"delete from media where id in %s" % sids)
|
|
|
|
# One-way syncing (sharing)
|
|
##########################################################################
|
|
|
|
def syncOneWay(self, lastSync):
|
|
"Sync two decks one way."
|
|
payload = self.server.genOneWayPayload(lastSync)
|
|
self.applyOneWayPayload(payload)
|
|
self.deck.reset()
|
|
|
|
def syncOneWayDeckName(self):
|
|
return (self.deck.s.scalar("select name from sources where id = :id",
|
|
id=self.server.deckName) or
|
|
hexifyID(int(self.server.deckName)))
|
|
|
|
def prepareOneWaySync(self):
|
|
"Sync setup. True if sync needed. Not used for local sync."
|
|
srcID = self.server.deckName
|
|
(lastSync, syncPeriod) = self.deck.s.first(
|
|
"select lastSync, syncPeriod from sources where id = :id", id=srcID)
|
|
if self.server.modified() <= lastSync:
|
|
return
|
|
self.deck.lastSync = lastSync
|
|
return True
|
|
|
|
def genOneWayPayload(self, lastSync):
|
|
"Bundle all added or changed objects since the last sync."
|
|
p = {}
|
|
# facts
|
|
factIds = self.deck.s.column0(
|
|
"select id from facts where modified > :l", l=lastSync)
|
|
p['facts'] = self.getFacts(factIds, updateModified=True)
|
|
# models
|
|
modelIds = self.deck.s.column0(
|
|
"select id from models where modified > :l", l=lastSync)
|
|
p['models'] = self.getModels(modelIds, updateModified=True)
|
|
# media
|
|
mediaIds = self.deck.s.column0(
|
|
"select id from media where created > :l", l=lastSync)
|
|
p['media'] = self.getMedia(mediaIds)
|
|
# cards
|
|
cardIds = self.deck.s.column0(
|
|
"select id from cards where modified > :l", l=lastSync)
|
|
p['cards'] = self.realLists(self.getOneWayCards(cardIds))
|
|
return p
|
|
|
|
def applyOneWayPayload(self, payload):
|
|
keys = [k for k in KEYS if k != "cards"]
|
|
# model, facts, media
|
|
for key in keys:
|
|
self.updateObjsFromKey(payload[key], key)
|
|
# models need their source tagged
|
|
for m in payload["models"]:
|
|
self.deck.s.statement("update models set source = :s "
|
|
"where id = :id",
|
|
s=self.server.deckName,
|
|
id=m['id'])
|
|
# cards last, handled differently
|
|
t = time.time()
|
|
try:
|
|
self.updateOneWayCards(payload['cards'])
|
|
except KeyError:
|
|
sys.stderr.write("Subscribed to a broken deck. "
|
|
"Try removing your deck subscriptions.")
|
|
t = 0
|
|
# update sync time
|
|
self.deck.s.statement(
|
|
"update sources set lastSync = :t where id = :id",
|
|
id=self.server.deckName, t=t)
|
|
self.deck.modified = time.time()
|
|
|
|
def getOneWayCards(self, ids):
|
|
"The minimum information necessary to generate one way cards."
|
|
return self.deck.s.all(
|
|
"select id, factId, cardModelId, ordinal, created from cards "
|
|
"where id in %s" % ids2str(ids))
|
|
|
|
def updateOneWayCards(self, cards):
|
|
if not cards:
|
|
return
|
|
t = time.time()
|
|
dlist = [{'id': c[0], 'factId': c[1], 'cardModelId': c[2],
|
|
'ordinal': c[3], 'created': c[4], 't': t} for c in cards]
|
|
# add any missing cards
|
|
self.deck.s.statements("""
|
|
insert or ignore into cards
|
|
(id, factId, cardModelId, created, modified, tags, ordinal,
|
|
priority, interval, lastInterval, due, lastDue, factor,
|
|
firstAnswered, reps, successive, averageTime, reviewTime, youngEase0,
|
|
youngEase1, youngEase2, youngEase3, youngEase4, matureEase0,
|
|
matureEase1, matureEase2, matureEase3, matureEase4, yesCount, noCount,
|
|
question, answer, lastFactor, spaceUntil, isDue, type, combinedDue,
|
|
relativeDelay)
|
|
values
|
|
(:id, :factId, :cardModelId, :created, :t, "", :ordinal,
|
|
1, 0, 0, :created, 0, 2.5,
|
|
0, 0, 0, 0, 0, 0,
|
|
0, 0, 0, 0, 0,
|
|
0, 0, 0, 0, 0,
|
|
0, "", "", 2.5, 0, 0, 2, :t, 2)""", dlist)
|
|
# update q/as
|
|
models = dict(self.deck.s.all("""
|
|
select cards.id, models.id
|
|
from cards, facts, models
|
|
where cards.factId = facts.id
|
|
and facts.modelId = models.id
|
|
and cards.id in %s""" % ids2str([c[0] for c in cards])))
|
|
self.deck.s.flush()
|
|
self.deck.updateCardQACache(
|
|
[(c[0], c[2], c[1], models[c[0]]) for c in cards])
|
|
# rebuild priorities on client
|
|
cardIds = [c[0] for c in cards]
|
|
self.deck.updateCardTags(cardIds)
|
|
self.rebuildPriorities(cardIds)
|
|
|
|
# Tools
|
|
##########################################################################
|
|
|
|
def modified(self):
|
|
return self.deck.modified
|
|
|
|
def _lastSync(self):
|
|
return self.deck.lastSync
|
|
|
|
def unstuff(self, data):
|
|
"Uncompress and convert to unicode."
|
|
return simplejson.loads(unicode(zlib.decompress(data), "utf8"))
|
|
|
|
def stuff(self, data):
|
|
"Convert into UTF-8 and compress."
|
|
return zlib.compress(simplejson.dumps(data))
|
|
|
|
def dictFromObj(self, obj):
|
|
"Return a dict representing OBJ without any hidden db fields."
|
|
return dict([(k,v) for (k,v) in obj.__dict__.items()
|
|
if not k.startswith("_")])
|
|
|
|
def applyDict(self, obj, dict):
|
|
"Apply each element in DICT to OBJ in a way the ORM notices."
|
|
for (k,v) in dict.items():
|
|
setattr(obj, k, v)
|
|
|
|
def realLists(self, result):
|
|
"Convert an SQLAlchemy response into a list of real lists."
|
|
return [list(x) for x in result]
|
|
|
|
def getObjsFromKey(self, ids, key):
|
|
return getattr(self, "get" + key.capitalize())(ids)
|
|
|
|
def deleteObjsFromKey(self, ids, key):
|
|
return getattr(self, "delete" + key.capitalize())(ids)
|
|
|
|
def updateObjsFromKey(self, ids, key):
|
|
return getattr(self, "update" + key.capitalize())(ids)
|
|
|
|
# Full sync
|
|
##########################################################################
|
|
|
|
def needFullSync(self, sums):
|
|
if self.deck.lastSync <= 0:
|
|
return True
|
|
for sum in sums:
|
|
for l in sum.values():
|
|
if len(l) > 1000:
|
|
return True
|
|
if self.deck.db.scalar(
|
|
"select count() from revlog where time > :ls",
|
|
ls=self.deck.lastSync) > 1000:
|
|
return True
|
|
lastDay = date.fromtimestamp(max(0, self.deck.lastSync - 60*60*24))
|
|
return False
|
|
|
|
def prepareFullSync(self):
|
|
t = time.time()
|
|
# ensure modified is not greater than server time
|
|
self.deck.modified = min(self.deck.modified, self.server.timestamp)
|
|
self.deck.db.commit()
|
|
self.deck.close()
|
|
fields = {
|
|
"p": self.server.password,
|
|
"u": self.server.username,
|
|
"d": self.server.deckName.encode("utf-8"),
|
|
}
|
|
if self.localTime > self.remoteTime:
|
|
return ("fromLocal", fields, self.deck.path)
|
|
else:
|
|
return ("fromServer", fields, self.deck.path)
|
|
|
|
def fullSync(self):
|
|
ret = self.prepareFullSync()
|
|
if ret[0] == "fromLocal":
|
|
self.fullSyncFromLocal(ret[1], ret[2])
|
|
else:
|
|
self.fullSyncFromServer(ret[1], ret[2])
|
|
|
|
def fullSyncFromLocal(self, fields, path):
|
|
global sendProgressHook
|
|
try:
|
|
# write into a temporary file, since POST needs content-length
|
|
src = open(path, "rb")
|
|
(fd, name) = tempfile.mkstemp(prefix="anki")
|
|
tmp = open(name, "w+b")
|
|
# post vars
|
|
for (key, value) in fields.items():
|
|
tmp.write('--' + MIME_BOUNDARY + "\r\n")
|
|
tmp.write('Content-Disposition: form-data; name="%s"\r\n' % key)
|
|
tmp.write('\r\n')
|
|
tmp.write(value)
|
|
tmp.write('\r\n')
|
|
# file header
|
|
tmp.write('--' + MIME_BOUNDARY + "\r\n")
|
|
tmp.write(
|
|
'Content-Disposition: form-data; name="deck"; filename="deck"\r\n')
|
|
tmp.write('Content-Type: application/octet-stream\r\n')
|
|
tmp.write('\r\n')
|
|
# data
|
|
comp = zlib.compressobj()
|
|
while 1:
|
|
data = src.read(CHUNK_SIZE)
|
|
if not data:
|
|
tmp.write(comp.flush())
|
|
break
|
|
tmp.write(comp.compress(data))
|
|
src.close()
|
|
tmp.write('\r\n--' + MIME_BOUNDARY + '--\r\n\r\n')
|
|
size = tmp.tell()
|
|
tmp.seek(0)
|
|
# open http connection
|
|
runHook("fullSyncStarted", size)
|
|
headers = {
|
|
'Content-type': 'multipart/form-data; boundary=%s' %
|
|
MIME_BOUNDARY,
|
|
'Content-length': str(size),
|
|
'Host': SYNC_HOST,
|
|
}
|
|
req = urllib2.Request(SYNC_URL + "fullup?v=2", tmp, headers)
|
|
try:
|
|
sendProgressHook = fullSyncProgressHook
|
|
res = urllib2.urlopen(req).read()
|
|
assert res.startswith("OK")
|
|
# update lastSync
|
|
c = sqlite.connect(path)
|
|
c.execute("update decks set lastSync = ?",
|
|
(res[3:],))
|
|
c.commit()
|
|
c.close()
|
|
finally:
|
|
sendProgressHook = None
|
|
tmp.close()
|
|
os.close(fd)
|
|
os.unlink(name)
|
|
finally:
|
|
runHook("fullSyncFinished")
|
|
|
|
def fullSyncFromServer(self, fields, path):
|
|
try:
|
|
runHook("fullSyncStarted", 0)
|
|
fields = urllib.urlencode(fields)
|
|
src = urllib.urlopen(SYNC_URL + "fulldown", fields)
|
|
(fd, tmpname) = tempfile.mkstemp(dir=os.path.dirname(path),
|
|
prefix="fullsync")
|
|
tmp = open(tmpname, "wb")
|
|
decomp = zlib.decompressobj()
|
|
cnt = 0
|
|
while 1:
|
|
data = src.read(CHUNK_SIZE)
|
|
if not data:
|
|
tmp.write(decomp.flush())
|
|
break
|
|
tmp.write(decomp.decompress(data))
|
|
cnt += CHUNK_SIZE
|
|
runHook("fullSyncProgress", "fromServer", cnt)
|
|
src.close()
|
|
tmp.close()
|
|
os.close(fd)
|
|
# if we were successful, overwrite old deck
|
|
os.unlink(path)
|
|
os.rename(tmpname, path)
|
|
# reset the deck name
|
|
c = sqlite.connect(path)
|
|
c.execute("update decks set syncName = ?",
|
|
[checksum(path.encode("utf-8"))])
|
|
c.commit()
|
|
c.close()
|
|
finally:
|
|
runHook("fullSyncFinished")
|
|
|
|
# Local syncing
|
|
##########################################################################
|
|
|
|
|
|
class SyncServer(SyncTools):
|
|
|
|
def __init__(self, deck=None):
|
|
SyncTools.__init__(self, deck)
|
|
|
|
class SyncClient(SyncTools):
|
|
|
|
pass
|
|
|
|
# HTTP proxy: act as a server and direct requests to the real server
|
|
##########################################################################
|
|
|
|
class HttpSyncServerProxy(SyncServer):
|
|
|
|
def __init__(self, user, passwd):
|
|
SyncServer.__init__(self)
|
|
self.decks = None
|
|
self.deckName = None
|
|
self.username = user
|
|
self.password = passwd
|
|
self.protocolVersion = 5
|
|
self.sourcesToCheck = []
|
|
|
|
def connect(self, clientVersion=""):
|
|
"Check auth, protocol & grab deck list."
|
|
if not self.decks:
|
|
import socket
|
|
socket.setdefaulttimeout(30)
|
|
d = self.runCmd("getDecks",
|
|
libanki=anki.version,
|
|
client=clientVersion,
|
|
sources=simplejson.dumps(self.sourcesToCheck),
|
|
pversion=self.protocolVersion)
|
|
socket.setdefaulttimeout(None)
|
|
if d['status'] != "OK":
|
|
raise SyncError(type="authFailed", status=d['status'])
|
|
self.decks = d['decks']
|
|
self.timestamp = d['timestamp']
|
|
self.timediff = abs(self.timestamp - time.time())
|
|
|
|
def hasDeck(self, deckName):
|
|
self.connect()
|
|
return deckName in self.decks.keys()
|
|
|
|
def availableDecks(self):
|
|
self.connect()
|
|
return self.decks.keys()
|
|
|
|
def createDeck(self, deckName):
|
|
ret = self.runCmd("createDeck", name=deckName.encode("utf-8"))
|
|
if not ret or ret['status'] != "OK":
|
|
raise SyncError(type="createFailed")
|
|
self.decks[deckName] = [0, 0]
|
|
|
|
def summary(self, lastSync):
|
|
return self.runCmd("summary",
|
|
lastSync=self.stuff(lastSync))
|
|
|
|
def genOneWayPayload(self, lastSync):
|
|
return self.runCmd("genOneWayPayload",
|
|
lastSync=self.stuff(lastSync))
|
|
|
|
def modified(self):
|
|
self.connect()
|
|
return self.decks[self.deckName][0]
|
|
|
|
def _lastSync(self):
|
|
self.connect()
|
|
return self.decks[self.deckName][1]
|
|
|
|
def applyPayload(self, payload):
|
|
return self.runCmd("applyPayload",
|
|
payload=self.stuff(payload))
|
|
|
|
def finish(self):
|
|
assert self.runCmd("finish") == "OK"
|
|
|
|
def runCmd(self, action, **args):
|
|
data = {"p": self.password,
|
|
"u": self.username,
|
|
"v": 2}
|
|
if self.deckName:
|
|
data['d'] = self.deckName.encode("utf-8")
|
|
else:
|
|
data['d'] = None
|
|
data.update(args)
|
|
data = urllib.urlencode(data)
|
|
try:
|
|
f = urllib2.urlopen(SYNC_URL + action, data)
|
|
except (urllib2.URLError, socket.error, socket.timeout,
|
|
httplib.BadStatusLine), e:
|
|
raise SyncError(type="connectionError",
|
|
exc=`e`)
|
|
ret = f.read()
|
|
if not ret:
|
|
raise SyncError(type="noResponse")
|
|
try:
|
|
return self.unstuff(ret)
|
|
except Exception, e:
|
|
raise SyncError(type="connectionError",
|
|
exc=`e`)
|
|
|
|
# HTTP server: respond to proxy requests and return data
|
|
##########################################################################
|
|
|
|
class HttpSyncServer(SyncServer):
|
|
def __init__(self):
|
|
SyncServer.__init__(self)
|
|
self.decks = {}
|
|
self.deck = None
|
|
|
|
def summary(self, lastSync):
|
|
return self.stuff(SyncServer.summary(
|
|
self, float(zlib.decompress(lastSync))))
|
|
|
|
def applyPayload(self, payload):
|
|
return self.stuff(SyncServer.applyPayload(self,
|
|
self.unstuff(payload)))
|
|
|
|
def genOneWayPayload(self, lastSync):
|
|
return self.stuff(SyncServer.genOneWayPayload(
|
|
self, float(zlib.decompress(lastSync))))
|
|
|
|
def getDecks(self, libanki, client, sources, pversion):
|
|
return self.stuff({
|
|
"status": "OK",
|
|
"decks": self.decks,
|
|
"timestamp": time.time(),
|
|
})
|
|
|
|
def createDeck(self, name):
|
|
"Create a deck on the server. Not implemented."
|
|
return self.stuff("OK")
|
|
|
|
# Local media copying
|
|
##########################################################################
|
|
|
|
def copyLocalMedia(src, dst):
|
|
srcDir = src.mediaDir()
|
|
if not srcDir:
|
|
return
|
|
dstDir = dst.mediaDir(create=True)
|
|
files = os.listdir(srcDir)
|
|
# find media references
|
|
used = {}
|
|
for col in ("question", "answer"):
|
|
txt = dst.s.column0("""
|
|
select %(c)s from cards where
|
|
%(c)s like '%%<img %%'
|
|
or %(c)s like '%%[sound:%%'""" % {'c': col})
|
|
for entry in txt:
|
|
for fname in mediaFiles(entry):
|
|
used[fname] = True
|
|
# copy only used media
|
|
for file in files:
|
|
if file not in used:
|
|
continue
|
|
srcfile = os.path.join(srcDir, file)
|
|
dstfile = os.path.join(dstDir, file)
|
|
if not os.path.exists(dstfile):
|
|
try:
|
|
shutil.copy2(srcfile, dstfile)
|
|
except IOError, OSError:
|
|
pass
|