extract and flag AV tags as part of the render process

We can now show replay buttons for the audio contained in {{FrontSide}}
without having to play it again when the answer is shown.

The template code now always defers FrontSide rendering, as it wasn't
a big saving, and meant the logic had to be implemented twice.
This commit is contained in:
Damien Elmes 2020-01-24 11:06:11 +10:00
parent e3fb184a84
commit 17ebdfc099
18 changed files with 205 additions and 216 deletions

View file

@ -17,8 +17,7 @@ message BackendInput {
RenderCardIn render_card = 21; RenderCardIn render_card = 21;
int64 local_minutes_west = 22; int64 local_minutes_west = 22;
string strip_av_tags = 23; string strip_av_tags = 23;
string get_av_tags = 24; ExtractAVTagsIn extract_av_tags = 24;
string flag_av_tags = 25;
} }
} }
@ -32,8 +31,7 @@ message BackendOutput {
RenderCardOut render_card = 21; RenderCardOut render_card = 21;
sint32 local_minutes_west = 22; sint32 local_minutes_west = 22;
string strip_av_tags = 23; string strip_av_tags = 23;
GetAVTagsOut get_av_tags = 24; ExtractAVTagsOut extract_av_tags = 24;
string flag_av_tags = 25;
BackendError error = 2047; BackendError error = 2047;
} }
@ -149,8 +147,14 @@ message RenderedTemplateReplacement {
repeated string filters = 3; repeated string filters = 3;
} }
message GetAVTagsOut { message ExtractAVTagsIn {
repeated AVTag av_tags = 1; string text = 1;
bool question_side = 2;
}
message ExtractAVTagsOut {
string text = 1;
repeated AVTag av_tags = 2;
} }
message AVTag { message AVTag {

View file

@ -5,12 +5,13 @@ from __future__ import annotations
import pprint import pprint
import time import time
from typing import Any, Dict, Optional, Union from typing import Any, Dict, List, Optional, Union
import anki # pylint: disable=unused-import import anki # pylint: disable=unused-import
from anki import hooks from anki import hooks
from anki.consts import * from anki.consts import *
from anki.notes import Note from anki.notes import Note
from anki.sound import AVTag
from anki.utils import intTime, joinFields, timestampID from anki.utils import intTime, joinFields, timestampID
# Cards # Cards
@ -26,7 +27,7 @@ from anki.utils import intTime, joinFields, timestampID
class Card: class Card:
_qa: Optional[Dict[str, Union[str, int]]] _qa: Optional[Dict[str, Union[str, int, List[AVTag]]]]
_note: Optional[Note] _note: Optional[Note]
timerStarted: Optional[float] timerStarted: Optional[float]
lastIvl: int lastIvl: int
@ -149,6 +150,12 @@ lapses=?, left=?, odue=?, odid=?, did=? where id = ?""",
def a(self) -> str: def a(self) -> str:
return self.css() + self._getQA()["a"] return self.css() + self._getQA()["a"]
def question_av_tags(self) -> List[AVTag]:
return self._qa["q_av_tags"] # type: ignore
def answer_av_tags(self) -> List[AVTag]:
return self._qa["a_av_tags"] # type: ignore
def css(self) -> str: def css(self) -> str:
return "<style>%s</style>" % self.model()["css"] return "<style>%s</style>" % self.model()["css"]

View file

@ -30,8 +30,9 @@ from anki.notes import Note
from anki.rsbackend import RustBackend from anki.rsbackend import RustBackend
from anki.sched import Scheduler as V1Scheduler from anki.sched import Scheduler as V1Scheduler
from anki.schedv2 import Scheduler as V2Scheduler from anki.schedv2 import Scheduler as V2Scheduler
from anki.sound import AVTag
from anki.tags import TagManager from anki.tags import TagManager
from anki.template import QAData, TemplateRenderContext, render_card from anki.template import QAData, RenderOutput, TemplateRenderContext, render_card
from anki.utils import ( from anki.utils import (
devMode, devMode,
fieldChecksum, fieldChecksum,
@ -633,7 +634,7 @@ where c.nid = n.id and c.id in %s group by nid"""
# data is [cid, nid, mid, did, ord, tags, flds, cardFlags] # data is [cid, nid, mid, did, ord, tags, flds, cardFlags]
def _renderQA( def _renderQA(
self, data: QAData, qfmt: Optional[str] = None, afmt: Optional[str] = None self, data: QAData, qfmt: Optional[str] = None, afmt: Optional[str] = None
) -> Dict[str, Union[str, int]]: ) -> Dict[str, Union[str, int, List[AVTag]]]:
# extract info from data # extract info from data
split_fields = splitFields(data[6]) split_fields = splitFields(data[6])
card_ord = data[4] card_ord = data[4]
@ -671,31 +672,44 @@ where c.nid = n.id and c.id in %s group by nid"""
# render fields. if any custom filters are encountered, # render fields. if any custom filters are encountered,
# the field_filter hook will be called. # the field_filter hook will be called.
try: try:
qtext, atext = render_card(self, qfmt, afmt, ctx) output = render_card(self, qfmt, afmt, ctx)
except anki.rsbackend.BackendException as e: except anki.rsbackend.BackendException as e:
errmsg = _("Card template has a problem:") + f"<br>{e}" errmsg = _("Card template has a problem:") + f"<br>{e}"
qtext = errmsg output = RenderOutput(
atext = errmsg question_text=errmsg,
answer_text=errmsg,
question_av_tags=[],
answer_av_tags=[],
)
# avoid showing the user a confusing blank card if they've # avoid showing the user a confusing blank card if they've
# forgotten to add a cloze deletion # forgotten to add a cloze deletion
if model["type"] == MODEL_CLOZE: if model["type"] == MODEL_CLOZE:
if not self.models._availClozeOrds(model, data[6], False): if not self.models._availClozeOrds(model, data[6], False):
qtext = ( output.question_text += "<p>" + _(
qtext "Please edit this note and add some cloze deletions. (%s)"
+ "<p>" ) % ("<a href=%s#cloze>%s</a>" % (HELP_SITE, _("help")))
+ _("Please edit this note and add some cloze deletions. (%s)")
% ("<a href=%s#cloze>%s</a>" % (HELP_SITE, _("help")))
)
# allow add-ons to modify the generated result # allow add-ons to modify the generated result
(qtext, atext) = hooks.card_did_render((qtext, atext), ctx) (output.question_text, output.answer_text) = hooks.card_did_render(
(output.question_text, output.answer_text), ctx
)
# legacy hook # legacy hook
qtext = runFilter("mungeQA", qtext, "q", fields, model, data, self) output.question_text = runFilter(
atext = runFilter("mungeQA", atext, "a", fields, model, data, self) "mungeQA", output.question_text, "q", fields, model, data, self
)
output.answer_text = runFilter(
"mungeQA", output.answer_text, "a", fields, model, data, self
)
return dict(q=qtext, a=atext, id=card_id) return dict(
q=output.question_text,
a=output.answer_text,
id=card_id,
q_av_tags=output.question_av_tags,
a_av_tags=output.answer_av_tags,
)
def _qaData(self, where="") -> Any: def _qaData(self, where="") -> Any:
"Return [cid, nid, mid, did, ord, tags, flds, cardFlags] db query" "Return [cid, nid, mid, did, ord, tags, flds, cardFlags] db query"

View file

@ -161,15 +161,16 @@ class RustBackend:
def strip_av_tags(self, text: str) -> str: def strip_av_tags(self, text: str) -> str:
return self._run_command(pb.BackendInput(strip_av_tags=text)).strip_av_tags return self._run_command(pb.BackendInput(strip_av_tags=text)).strip_av_tags
def get_av_tags(self, text: str) -> List[AVTag]: def extract_av_tags(
return list( self, text: str, question_side: bool
map( ) -> Tuple[str, List[AVTag]]:
av_tag_to_native, out = self._run_command(
self._run_command( pb.BackendInput(
pb.BackendInput(get_av_tags=text) extract_av_tags=pb.ExtractAVTagsIn(
).get_av_tags.av_tags, text=text, question_side=question_side
)
) )
) ).extract_av_tags
native_tags = list(map(av_tag_to_native, out.av_tags))
def flag_av_tags(self, text: str) -> str: return out.text, native_tags
return self._run_command(pb.BackendInput(flag_av_tags=text)).flag_av_tags

View file

@ -4,8 +4,7 @@
""" """
Sound/TTS references extracted from card text. Sound/TTS references extracted from card text.
Use collection.backend.strip_av_tags(string) to remove all tags, These can be accessed via eg card.question_av_tags()
and collection.backend.get_av_tags(string) to get a list of AVTags.
""" """
from __future__ import annotations from __future__ import annotations

View file

@ -29,12 +29,14 @@ template_legacy.py file, using the legacy addHook() system.
from __future__ import annotations from __future__ import annotations
import re import re
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple from typing import Any, Dict, List, Optional, Tuple
import anki import anki
from anki import hooks from anki import hooks
from anki.models import NoteType from anki.models import NoteType
from anki.rsbackend import TemplateReplacementList from anki.rsbackend import TemplateReplacementList
from anki.sound import AVTag
QAData = Tuple[ QAData = Tuple[
# Card ID this QA comes from. Corresponds to 'cid' column. # Card ID this QA comes from. Corresponds to 'cid' column.
@ -122,18 +124,34 @@ class TemplateRenderContext:
return self._note_type return self._note_type
@dataclass
class RenderOutput:
question_text: str
answer_text: str
question_av_tags: List[AVTag]
answer_av_tags: List[AVTag]
def render_card( def render_card(
col: anki.storage._Collection, qfmt: str, afmt: str, ctx: TemplateRenderContext col: anki.storage._Collection, qfmt: str, afmt: str, ctx: TemplateRenderContext
) -> Tuple[str, str]: ) -> RenderOutput:
"""Renders the provided templates, returning rendered q & a text. """Renders the provided templates, returning rendered output.
Will raise if the template is invalid.""" Will raise if the template is invalid."""
(qnodes, anodes) = col.backend.render_card(qfmt, afmt, ctx.fields(), ctx.card_ord()) (qnodes, anodes) = col.backend.render_card(qfmt, afmt, ctx.fields(), ctx.card_ord())
qtext = apply_custom_filters(qnodes, ctx, front_side=None) qtext = apply_custom_filters(qnodes, ctx, front_side=None)
atext = apply_custom_filters(anodes, ctx, front_side=qtext) qtext, q_avtags = col.backend.extract_av_tags(qtext, True)
return qtext, atext atext = apply_custom_filters(anodes, ctx, front_side=qtext)
atext, a_avtags = col.backend.extract_av_tags(atext, False)
return RenderOutput(
question_text=qtext,
answer_text=atext,
question_av_tags=q_avtags,
answer_av_tags=a_avtags,
)
def apply_custom_filters( def apply_custom_filters(
@ -153,7 +171,7 @@ def apply_custom_filters(
else: else:
# do we need to inject in FrontSide? # do we need to inject in FrontSide?
if node.field_name == "FrontSide" and front_side is not None: if node.field_name == "FrontSide" and front_side is not None:
node.current_text = ctx.col().backend.strip_av_tags(front_side) node.current_text = front_side
field_text = node.current_text field_text = node.current_text
for filter_name in node.filters: for filter_name in node.filters:

View file

@ -142,7 +142,7 @@ def test_furigana():
# and should avoid sound # and should avoid sound
n["Front"] = "foo[sound:abc.mp3]" n["Front"] = "foo[sound:abc.mp3]"
n.flush() n.flush()
assert "sound:" in c.q(reload=True) assert "anki:play" in c.q(reload=True)
# it shouldn't throw an error while people are editing # it shouldn't throw an error while people are editing
m["tmpls"][0]["qfmt"] = "{{kana:}}" m["tmpls"][0]["qfmt"] = "{{kana:}}"
mm.save(m) mm.save(m)

View file

@ -32,7 +32,6 @@ from aqt.utils import (
askUser, askUser,
getOnlyText, getOnlyText,
getTag, getTag,
mungeQA,
openHelp, openHelp,
qtMenuShortcutWorkaround, qtMenuShortcutWorkaround,
restoreGeom, restoreGeom,
@ -1682,9 +1681,9 @@ where id in %s"""
# need to force reload even if answer # need to force reload even if answer
txt = c.q(reload=True) txt = c.q(reload=True)
questionAudio = [] question_audio = []
if self._previewBothSides: if self._previewBothSides:
questionAudio = self.mw.col.backend.get_av_tags(txt) question_audio = c.question_av_tags()
if self._previewState == "answer": if self._previewState == "answer":
func = "_showAnswer" func = "_showAnswer"
txt = c.a() txt = c.a()
@ -1694,13 +1693,13 @@ where id in %s"""
if self.mw.reviewer.autoplay(c): if self.mw.reviewer.autoplay(c):
# if we're showing both sides at once, play question audio first # if we're showing both sides at once, play question audio first
av_player.play_tags(questionAudio) av_player.play_tags(question_audio)
# then play any audio that hasn't already been played # then play any audio that hasn't already been played
answer_audio = self.mw.col.backend.get_av_tags(txt) answer_audio = c.answer_av_tags()
unplayed_audio = [x for x in answer_audio if x not in questionAudio] unplayed_audio = [x for x in answer_audio if x not in question_audio]
av_player.extend_and_play(unplayed_audio) av_player.extend_and_play(unplayed_audio)
txt = mungeQA(self.col, txt) txt = self.mw.prepare_card_text_for_display(txt)
gui_hooks.card_will_show( gui_hooks.card_will_show(
txt, c, "preview" + self._previewState.capitalize() txt, c, "preview" + self._previewState.capitalize()
) )

View file

@ -5,8 +5,10 @@
import collections import collections
import json import json
import re import re
from typing import Optional
import aqt import aqt
from anki.cards import Card
from anki.consts import * from anki.consts import *
from anki.lang import _, ngettext from anki.lang import _, ngettext
from anki.utils import isMac, isWin, joinFields from anki.utils import isMac, isWin, joinFields
@ -18,7 +20,6 @@ from aqt.utils import (
askUser, askUser,
downArrow, downArrow,
getOnlyText, getOnlyText,
mungeQA,
openHelp, openHelp,
restoreGeom, restoreGeom,
saveGeom, saveGeom,
@ -29,6 +30,8 @@ from aqt.webview import AnkiWebView
class CardLayout(QDialog): class CardLayout(QDialog):
card: Optional[Card]
def __init__(self, mw, note, ord=0, parent=None, addMode=False): def __init__(self, mw, note, ord=0, parent=None, addMode=False):
QDialog.__init__(self, parent or mw, Qt.Window) QDialog.__init__(self, parent or mw, Qt.Window)
mw.setupDialogGC(self) mw.setupDialogGC(self)
@ -339,10 +342,10 @@ Please create a new card type first."""
bodyclass = theme_manager.body_classes_for_card_ord(c.ord) bodyclass = theme_manager.body_classes_for_card_ord(c.ord)
q = ti(mungeQA(self.mw.col, c.q(reload=True))) q = ti(self.mw.prepare_card_text_for_display(c.q(reload=True)))
q = gui_hooks.card_will_show(q, c, "clayoutQuestion") q = gui_hooks.card_will_show(q, c, "clayoutQuestion")
a = ti(mungeQA(self.mw.col, c.a()), type="a") a = ti(self.mw.prepare_card_text_for_display(c.a()), type="a")
a = gui_hooks.card_will_show(a, c, "clayoutAnswer") a = gui_hooks.card_will_show(a, c, "clayoutAnswer")
# use _showAnswer to avoid the longer delay # use _showAnswer to avoid the longer delay
@ -350,7 +353,7 @@ Please create a new card type first."""
self.pform.backWeb.eval("_showAnswer(%s, '%s');" % (json.dumps(a), bodyclass)) self.pform.backWeb.eval("_showAnswer(%s, '%s');" % (json.dumps(a), bodyclass))
if c.id not in self.playedAudio: if c.id not in self.playedAudio:
av_player.play_from_text(self.mw.col, c.q() + c.a()) av_player.play_tags(c.question_av_tags() + c.answer_av_tags())
self.playedAudio[c.id] = True self.playedAudio[c.id] = True
self.updateCardNames() self.updateCardNames()

View file

@ -24,11 +24,8 @@ def bodyClass(col, card) -> str:
def allSounds(text) -> List: def allSounds(text) -> List:
print("allSounds() deprecated") print("allSounds() deprecated")
return [ text, tags = aqt.mw.col.backend.extract_av_tags(text, True)
x.filename return [x.filename for x in tags if isinstance(x, SoundOrVideoTag)]
for x in aqt.mw.col.backend.get_av_tags(text)
if isinstance(x, SoundOrVideoTag)
]
def stripSounds(text) -> str: def stripSounds(text) -> str:

View file

@ -400,14 +400,17 @@ close the profile or restart Anki."""
def setupSound(self) -> None: def setupSound(self) -> None:
aqt.sound.setup_audio(self.taskman, self.pm.base) aqt.sound.setup_audio(self.taskman, self.pm.base)
def process_av_tags(self, text: str) -> Tuple[str, List[AVTag]]: def _add_play_buttons(self, text: str) -> str:
"Return card text with play buttons added, and the extracted AV tags." "Return card text with play buttons added, or stripped."
tags = self.col.backend.get_av_tags(text)
if self.pm.profile.get("showPlayButtons", True): if self.pm.profile.get("showPlayButtons", True):
text = aqt.sound.av_flags_to_html(self.col.backend.flag_av_tags(text)) return aqt.sound.av_refs_to_play_icons(text)
else: else:
text = self.col.backend.strip_av_tags(text) return aqt.sound.strip_av_refs(text)
return (text, tags)
def prepare_card_text_for_display(self, text: str) -> str:
text = self.col.media.escapeImages(text)
text = self._add_play_buttons(text)
return text
# Collection load/unload # Collection load/unload
########################################################################## ##########################################################################

View file

@ -13,20 +13,13 @@ from typing import List, Optional
from anki import hooks from anki import hooks
from anki.cards import Card from anki.cards import Card
from anki.lang import _, ngettext from anki.lang import _, ngettext
from anki.sound import AVTag
from anki.utils import stripHTML from anki.utils import stripHTML
from aqt import AnkiQt, gui_hooks from aqt import AnkiQt, gui_hooks
from aqt.qt import * from aqt.qt import *
from aqt.sound import av_player, getAudio from aqt.sound import av_player, getAudio, play_clicked_audio
from aqt.theme import theme_manager from aqt.theme import theme_manager
from aqt.toolbar import BottomBar from aqt.toolbar import BottomBar
from aqt.utils import ( from aqt.utils import askUserDialog, downArrow, qtMenuShortcutWorkaround, tooltip
askUserDialog,
downArrow,
mungeQA,
qtMenuShortcutWorkaround,
tooltip,
)
class Reviewer: class Reviewer:
@ -40,7 +33,6 @@ class Reviewer:
self.hadCardQueue = False self.hadCardQueue = False
self._answeredIds: List[int] = [] self._answeredIds: List[int] = []
self._recordedAudio = None self._recordedAudio = None
self._current_side_audio: Optional[List[AVTag]] = None
self.typeCorrect = None # web init happens before this is set self.typeCorrect = None # web init happens before this is set
self.state: Optional[str] = None self.state: Optional[str] = None
self.bottom = BottomBar(mw, mw.bottomWeb) self.bottom = BottomBar(mw, mw.bottomWeb)
@ -116,21 +108,12 @@ class Reviewer:
state = self.state state = self.state
c = self.card c = self.card
if state == "question": if state == "question":
av_player.play_from_text(self.mw.col, c.q()) av_player.play_tags(c.question_av_tags())
elif state == "answer": elif state == "answer":
txt = "" tags = c.answer_av_tags()
if self._replayq(c, previewer): if self._replayq(c, previewer):
txt = c.q() tags = c.question_av_tags() + tags
txt += c.a() av_player.play_tags(tags)
av_player.play_from_text(self.mw.col, txt)
def on_play_button(self, idx: int):
try:
tag = self._current_side_audio[idx]
except IndexError:
return
av_player.play_tags([tag])
# Initializing the webview # Initializing the webview
########################################################################## ##########################################################################
@ -176,7 +159,7 @@ class Reviewer:
########################################################################## ##########################################################################
def _mungeQA(self, buf): def _mungeQA(self, buf):
return self.typeAnsFilter(mungeQA(self.mw.col, buf)) return self.typeAnsFilter(self.mw.prepare_card_text_for_display(buf))
def _showQuestion(self) -> None: def _showQuestion(self) -> None:
self._reps += 1 self._reps += 1
@ -192,9 +175,9 @@ The front of this card is empty. Please run Tools>Empty Cards."""
else: else:
q = c.q() q = c.q()
q, self._current_side_audio = self.mw.process_av_tags(q) # play audio?
if self.autoplay(c): if self.autoplay(c):
av_player.play_tags(self._current_side_audio) av_player.play_tags(c.question_av_tags())
# render & update bottom # render & update bottom
q = self._mungeQA(q) q = self._mungeQA(q)
@ -236,9 +219,9 @@ The front of this card is empty. Please run Tools>Empty Cards."""
c = self.card c = self.card
a = c.a() a = c.a()
# play audio? # play audio?
a, self._current_side_audio = self.mw.process_av_tags(a)
if self.autoplay(c): if self.autoplay(c):
av_player.play_tags(self._current_side_audio) av_player.play_tags(c.answer_av_tags())
a = self._mungeQA(a) a = self._mungeQA(a)
a = gui_hooks.card_will_show(a, c, "reviewAnswer") a = gui_hooks.card_will_show(a, c, "reviewAnswer")
# render and update bottom # render and update bottom
@ -333,7 +316,7 @@ The front of this card is empty. Please run Tools>Empty Cards."""
elif url == "more": elif url == "more":
self.showContextMenu() self.showContextMenu()
elif url.startswith("play:"): elif url.startswith("play:"):
self.on_play_button(int(url.split(":")[1])) play_clicked_audio(url, self.card)
else: else:
print("unrecognized anki link:", url) print("unrecognized anki link:", url)

View file

@ -16,8 +16,8 @@ from typing import Any, Callable, Dict, List, Optional, Tuple
import pyaudio import pyaudio
import anki
import aqt import aqt
from anki.cards import Card
from anki.lang import _ from anki.lang import _
from anki.sound import AVTag, SoundOrVideoTag from anki.sound import AVTag, SoundOrVideoTag
from anki.utils import isLin, isMac, isWin from anki.utils import isLin, isMac, isWin
@ -104,14 +104,6 @@ class AVPlayer:
self._enqueued.extend(tags) self._enqueued.extend(tags)
self._play_next_if_idle() self._play_next_if_idle()
def play_from_text(self, col: anki.storage._Collection, text: str) -> None:
tags = col.backend.get_av_tags(text)
self.play_tags(tags)
def extend_from_text(self, col: anki.storage._Collection, text: str) -> None:
tags = col.backend.get_av_tags(text)
self.extend_and_play(tags)
def stop_and_clear_queue(self) -> None: def stop_and_clear_queue(self) -> None:
self._enqueued = [] self._enqueued = []
self._stop_if_playing() self._stop_if_playing()
@ -572,9 +564,7 @@ def play(filename: str) -> None:
def playFromText(text) -> None: def playFromText(text) -> None:
from aqt import mw print("playFromText() deprecated")
av_player.extend_from_text(mw.col, text)
# legacy globals # legacy globals
@ -590,19 +580,39 @@ for (k, v) in _exports:
# Tag handling # Tag handling
########################################################################## ##########################################################################
AV_FLAG_RE = re.compile(r"\[anki:play\](\d+)\[/anki:play]") AV_FLAG_RE = re.compile(r"\[anki:(play:.:\d+)\]")
def av_flags_to_html(text: str) -> str: def strip_av_refs(text: str) -> str:
return AV_FLAG_RE.sub("", text)
def av_refs_to_play_icons(text: str) -> str:
"""Add play icons into the HTML.
When clicked, the icon will call eg pycmd('play:q:1').
"""
def repl(match: re.Match) -> str: def repl(match: re.Match) -> str:
return f""" return f"""
<a class=soundLink href=# onclick="pycmd('play:{match.group(1)}'); return false;"> <a class=soundLink href=# onclick="pycmd('{match.group(1)}'); return false;">
<img class=playImage src='/_anki/imgs/play.png'> <img class=playImage src='/_anki/imgs/play.png'>
</a>""" </a>"""
return AV_FLAG_RE.sub(repl, text) return AV_FLAG_RE.sub(repl, text)
def play_clicked_audio(pycmd: str, card: Card) -> None:
"""eg. if pycmd is 'play:q:0', play the first audio on the question side."""
play, context, str_idx = pycmd.split(":")
idx = int(str_idx)
if context == "q":
tags = card.question_av_tags()
else:
tags = card.answer_av_tags()
av_player.play_tags([tags[idx]])
# Init defaults # Init defaults
########################################################################## ##########################################################################

View file

@ -480,8 +480,8 @@ def restoreHeader(widget, key):
def mungeQA(col, txt): def mungeQA(col, txt):
print("mungeQA() deprecated; use mw.prepare_card_text_for_display()")
txt = col.media.escapeImages(txt) txt = col.media.escapeImages(txt)
txt = col.backend.strip_av_tags(txt)
return txt return txt

View file

@ -10,7 +10,7 @@ use crate::template::{
render_card, without_legacy_template_directives, FieldMap, FieldRequirements, ParsedTemplate, render_card, without_legacy_template_directives, FieldMap, FieldRequirements, ParsedTemplate,
RenderedNode, RenderedNode,
}; };
use crate::text::{av_tags_in_string, flag_av_tags, strip_av_tags, AVTag}; use crate::text::{extract_av_tags, strip_av_tags, AVTag};
use prost::Message; use prost::Message;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::path::PathBuf; use std::path::PathBuf;
@ -100,8 +100,7 @@ impl Backend {
OValue::LocalMinutesWest(local_minutes_west_for_stamp(stamp)) OValue::LocalMinutesWest(local_minutes_west_for_stamp(stamp))
} }
Value::StripAvTags(text) => OValue::StripAvTags(strip_av_tags(&text).into()), Value::StripAvTags(text) => OValue::StripAvTags(strip_av_tags(&text).into()),
Value::GetAvTags(text) => OValue::GetAvTags(self.get_av_tags(&text)), Value::ExtractAvTags(input) => OValue::ExtractAvTags(self.extract_av_tags(input)),
Value::FlagAvTags(text) => OValue::FlagAvTags(flag_av_tags(&text).into()),
}) })
} }
@ -183,11 +182,13 @@ impl Backend {
}) })
} }
fn get_av_tags(&self, text: &str) -> pt::GetAvTagsOut { fn extract_av_tags(&self, input: pt::ExtractAvTagsIn) -> pt::ExtractAvTagsOut {
let tags = av_tags_in_string(text) let (text, tags) = extract_av_tags(&input.text, input.question_side);
let pt_tags = tags
.into_iter()
.map(|avtag| match avtag { .map(|avtag| match avtag {
AVTag::SoundOrVideo(file) => pt::AvTag { AVTag::SoundOrVideo(file) => pt::AvTag {
value: Some(pt::av_tag::Value::SoundOrVideo(file.to_string())), value: Some(pt::av_tag::Value::SoundOrVideo(file)),
}, },
AVTag::TextToSpeech { AVTag::TextToSpeech {
field_text, field_text,
@ -196,16 +197,19 @@ impl Backend {
other_args, other_args,
} => pt::AvTag { } => pt::AvTag {
value: Some(pt::av_tag::Value::Tts(pt::TtsTag { value: Some(pt::av_tag::Value::Tts(pt::TtsTag {
field_text: field_text.to_string(), field_text,
lang: lang.to_string(), lang,
voices: voices.into_iter().map(ToOwned::to_owned).collect(), voices,
other_args: other_args.into_iter().map(ToOwned::to_owned).collect(), other_args,
})), })),
}, },
}) })
.collect(); .collect();
pt::GetAvTagsOut { av_tags: tags } pt::ExtractAvTagsOut {
text: text.into(),
av_tags: pt_tags,
}
} }
} }

View file

@ -3,7 +3,6 @@
use crate::err::{Result, TemplateError}; use crate::err::{Result, TemplateError};
use crate::template_filters::apply_filters; use crate::template_filters::apply_filters;
use crate::text::strip_av_tags;
use lazy_static::lazy_static; use lazy_static::lazy_static;
use nom; use nom;
use nom::branch::alt; use nom::branch::alt;
@ -269,7 +268,6 @@ pub(crate) struct RenderContext<'a> {
pub nonempty_fields: &'a HashSet<&'a str>, pub nonempty_fields: &'a HashSet<&'a str>,
pub question_side: bool, pub question_side: bool,
pub card_ord: u16, pub card_ord: u16,
pub front_text: Option<Cow<'a, str>>,
} }
impl ParsedTemplate<'_> { impl ParsedTemplate<'_> {
@ -302,19 +300,13 @@ fn render_into(
key: key @ "FrontSide", key: key @ "FrontSide",
.. ..
} => { } => {
if let Some(front_side) = &context.front_text { // defer FrontSide rendering to Python, as extra
// a fully rendered front side is available, so we can // filters may be required
// bake it into the output rendered_nodes.push(RenderedNode::Replacement {
append_str_to_nodes(rendered_nodes, front_side.as_ref()); field_name: (*key).to_string(),
} else { filters: vec![],
// the front side contains unknown filters, and must current_text: "".into(),
// be completed by the Python code });
rendered_nodes.push(RenderedNode::Replacement {
field_name: (*key).to_string(),
filters: vec![],
current_text: "".into(),
});
}
} }
Replacement { key: "", filters } if !filters.is_empty() => { Replacement { key: "", filters } if !filters.is_empty() => {
// if a filter is provided, we accept an empty field name to // if a filter is provided, we accept an empty field name to
@ -435,19 +427,12 @@ pub fn render_card(
nonempty_fields: &nonempty_fields(field_map), nonempty_fields: &nonempty_fields(field_map),
question_side: true, question_side: true,
card_ord, card_ord,
front_text: None,
}; };
// question side // question side
let qnorm = without_legacy_template_directives(qfmt); let qnorm = without_legacy_template_directives(qfmt);
let qnodes = ParsedTemplate::from_text(qnorm.as_ref())?.render(&context)?; let qnodes = ParsedTemplate::from_text(qnorm.as_ref())?.render(&context)?;
// if the question side didn't have any unknown filters, we can pass
// FrontSide in now
if let [RenderedNode::Text { ref text }] = *qnodes.as_slice() {
context.front_text = Some(strip_av_tags(text));
}
// answer side // answer side
context.question_side = false; context.question_side = false;
let anorm = without_legacy_template_directives(afmt); let anorm = without_legacy_template_directives(afmt);
@ -517,10 +502,9 @@ mod test {
use super::{FieldMap, ParsedNode::*, ParsedTemplate as PT}; use super::{FieldMap, ParsedNode::*, ParsedTemplate as PT};
use crate::err::TemplateError; use crate::err::TemplateError;
use crate::template::{ use crate::template::{
field_is_empty, nonempty_fields, render_card, without_legacy_template_directives, field_is_empty, nonempty_fields, without_legacy_template_directives, FieldRequirements,
FieldRequirements, RenderContext, RenderedNode, RenderContext,
}; };
use crate::text::strip_html;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::iter::FromIterator; use std::iter::FromIterator;
@ -683,7 +667,6 @@ mod test {
nonempty_fields: &nonempty_fields(&map), nonempty_fields: &nonempty_fields(&map),
question_side: true, question_side: true,
card_ord: 1, card_ord: 1,
front_text: None,
}; };
use crate::template::RenderedNode as FN; use crate::template::RenderedNode as FN;
@ -780,39 +763,4 @@ mod test {
}] }]
); );
} }
fn get_complete_template(nodes: &Vec<RenderedNode>) -> Option<&str> {
if let [RenderedNode::Text { ref text }] = nodes.as_slice() {
Some(text.as_str())
} else {
None
}
}
#[test]
fn test_render_full() {
// make sure front and back side renders cloze differently
let fmt = "{{cloze:Text}}";
let clozed_text = "{{c1::one}} {{c2::two::hint}}";
let map: HashMap<_, _> = vec![("Text", clozed_text)].into_iter().collect();
let (qnodes, anodes) = render_card(fmt, fmt, &map, 0).unwrap();
assert_eq!(
strip_html(get_complete_template(&qnodes).unwrap()),
"[...] two"
);
assert_eq!(
strip_html(get_complete_template(&anodes).unwrap()),
"one two"
);
// FrontSide should render if only standard modifiers were used
let (_qnodes, anodes) =
render_card("{{kana:text:Text}}", "{{FrontSide}}", &map, 1).unwrap();
assert_eq!(get_complete_template(&anodes).unwrap(), clozed_text);
// But if a custom modifier was used, it's deferred to the Python code
let (_qnodes, anodes) = render_card("{{custom:Text}}", "{{FrontSide}}", &map, 1).unwrap();
assert_eq!(get_complete_template(&anodes).is_none(), true)
}
} }

View file

@ -341,7 +341,6 @@ field</a>
nonempty_fields: &Default::default(), nonempty_fields: &Default::default(),
question_side: false, question_side: false,
card_ord: 0, card_ord: 0,
front_text: None,
}; };
assert_eq!( assert_eq!(
apply_filters("ignored", &["cloze", "type"], "Text", &ctx), apply_filters("ignored", &["cloze", "type"], "Text", &ctx),
@ -357,7 +356,6 @@ field</a>
nonempty_fields: &Default::default(), nonempty_fields: &Default::default(),
question_side: true, question_side: true,
card_ord: 0, card_ord: 0,
front_text: None,
}; };
assert_eq!(strip_html(&cloze_filter(text, &ctx)).as_ref(), "[...] two"); assert_eq!(strip_html(&cloze_filter(text, &ctx)).as_ref(), "[...] two");
assert_eq!( assert_eq!(

View file

@ -9,13 +9,13 @@ use std::collections::HashSet;
use std::ptr; use std::ptr;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum AVTag<'a> { pub enum AVTag {
SoundOrVideo(Cow<'a, str>), SoundOrVideo(String),
TextToSpeech { TextToSpeech {
field_text: Cow<'a, str>, field_text: String,
lang: &'a str, lang: String,
voices: Vec<&'a str>, voices: Vec<String>,
other_args: Vec<&'a str>, other_args: Vec<String>,
}, },
} }
@ -78,27 +78,29 @@ pub fn strip_av_tags(text: &str) -> Cow<str> {
AV_TAGS.replace_all(text, "") AV_TAGS.replace_all(text, "")
} }
pub fn flag_av_tags(text: &str) -> Cow<str> { /// Extract audio tags from string, replacing them with [anki:play] refs
let mut idx = 0; pub fn extract_av_tags<'a>(text: &'a str, question_side: bool) -> (Cow<'a, str>, Vec<AVTag>) {
AV_TAGS.replace_all(text, |_caps: &Captures| { let mut tags = vec![];
let text = format!("[anki:play]{}[/anki:play]", idx); let context = if question_side { 'q' } else { 'a' };
idx += 1; let replaced_text = AV_TAGS.replace_all(text, |caps: &Captures| {
text // extract
}) let tag = if let Some(av_file) = caps.get(1) {
} AVTag::SoundOrVideo(decode_entities(av_file.as_str()).into())
pub fn av_tags_in_string(text: &str) -> impl Iterator<Item = AVTag> {
AV_TAGS.captures_iter(text).map(|caps| {
if let Some(av_file) = caps.get(1) {
AVTag::SoundOrVideo(decode_entities(av_file.as_str()))
} else { } else {
let args = caps.get(2).unwrap(); let args = caps.get(2).unwrap();
let field_text = caps.get(3).unwrap(); let field_text = caps.get(3).unwrap();
tts_tag_from_string(field_text.as_str(), args.as_str()) tts_tag_from_string(field_text.as_str(), args.as_str())
} };
}) tags.push(tag);
// and replace with reference
format!("[anki:play:{}:{}]", context, tags.len() - 1)
});
(replaced_text, tags)
} }
fn tts_tag_from_string<'a>(field_text: &'a str, args: &'a str) -> AVTag<'a> { fn tts_tag_from_string<'a>(field_text: &'a str, args: &'a str) -> AVTag {
let mut other_args = vec![]; let mut other_args = vec![];
let mut split_args = args.split(' '); let mut split_args = args.split(' ');
let lang = split_args.next().unwrap_or(""); let lang = split_args.next().unwrap_or("");
@ -109,15 +111,15 @@ fn tts_tag_from_string<'a>(field_text: &'a str, args: &'a str) -> AVTag<'a> {
voices = remaining_arg voices = remaining_arg
.split('=') .split('=')
.nth(1) .nth(1)
.map(|voices| voices.split(',').collect()); .map(|voices| voices.split(',').map(ToOwned::to_owned).collect());
} else { } else {
other_args.push(remaining_arg); other_args.push(remaining_arg.to_owned());
} }
} }
AVTag::TextToSpeech { AVTag::TextToSpeech {
field_text: strip_html_for_tts(field_text), field_text: strip_html_for_tts(field_text).into(),
lang, lang: lang.into(),
voices: voices.unwrap_or_else(Vec::new), voices: voices.unwrap_or_else(Vec::new),
other_args, other_args,
} }
@ -149,7 +151,7 @@ pub fn cloze_numbers_in_string(html: &str) -> HashSet<u16> {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use crate::text::{ use crate::text::{
av_tags_in_string, cloze_numbers_in_string, flag_av_tags, strip_av_tags, strip_html, cloze_numbers_in_string, extract_av_tags, strip_av_tags, strip_html,
strip_html_preserving_image_filenames, AVTag, strip_html_preserving_image_filenames, AVTag,
}; };
use std::collections::HashSet; use std::collections::HashSet;
@ -188,22 +190,21 @@ mod test {
let s = let s =
"abc[sound:fo&amp;o.mp3]def[anki:tts][en_US voices=Bob,Jane]foo<br>1&gt;2[/anki:tts]gh"; "abc[sound:fo&amp;o.mp3]def[anki:tts][en_US voices=Bob,Jane]foo<br>1&gt;2[/anki:tts]gh";
assert_eq!(strip_av_tags(s), "abcdefgh"); assert_eq!(strip_av_tags(s), "abcdefgh");
let (text, tags) = extract_av_tags(s, true);
assert_eq!(text, "abc[anki:play:q:0]def[anki:play:q:1]gh");
assert_eq!( assert_eq!(
av_tags_in_string(s).collect::<Vec<_>>(), tags,
vec![ vec![
AVTag::SoundOrVideo("fo&o.mp3".into()), AVTag::SoundOrVideo("fo&o.mp3".into()),
AVTag::TextToSpeech { AVTag::TextToSpeech {
field_text: "foo 1>2".into(), field_text: "foo 1>2".into(),
lang: "en_US", lang: "en_US".into(),
voices: vec!["Bob", "Jane"], voices: vec!["Bob".into(), "Jane".into()],
other_args: vec![] other_args: vec![]
}, },
] ]
); );
assert_eq!(
flag_av_tags(s),
"abc[anki:play]0[/anki:play]def[anki:play]1[/anki:play]gh"
);
} }
} }