extract and flag AV tags as part of the render process

We can now show replay buttons for the audio contained in {{FrontSide}}
without having to play it again when the answer is shown.

The template code now always defers FrontSide rendering, as it wasn't
a big saving, and meant the logic had to be implemented twice.
This commit is contained in:
Damien Elmes 2020-01-24 11:06:11 +10:00
parent e3fb184a84
commit 17ebdfc099
18 changed files with 205 additions and 216 deletions

View file

@ -17,8 +17,7 @@ message BackendInput {
RenderCardIn render_card = 21;
int64 local_minutes_west = 22;
string strip_av_tags = 23;
string get_av_tags = 24;
string flag_av_tags = 25;
ExtractAVTagsIn extract_av_tags = 24;
}
}
@ -32,8 +31,7 @@ message BackendOutput {
RenderCardOut render_card = 21;
sint32 local_minutes_west = 22;
string strip_av_tags = 23;
GetAVTagsOut get_av_tags = 24;
string flag_av_tags = 25;
ExtractAVTagsOut extract_av_tags = 24;
BackendError error = 2047;
}
@ -149,8 +147,14 @@ message RenderedTemplateReplacement {
repeated string filters = 3;
}
message GetAVTagsOut {
repeated AVTag av_tags = 1;
message ExtractAVTagsIn {
string text = 1;
bool question_side = 2;
}
message ExtractAVTagsOut {
string text = 1;
repeated AVTag av_tags = 2;
}
message AVTag {

View file

@ -5,12 +5,13 @@ from __future__ import annotations
import pprint
import time
from typing import Any, Dict, Optional, Union
from typing import Any, Dict, List, Optional, Union
import anki # pylint: disable=unused-import
from anki import hooks
from anki.consts import *
from anki.notes import Note
from anki.sound import AVTag
from anki.utils import intTime, joinFields, timestampID
# Cards
@ -26,7 +27,7 @@ from anki.utils import intTime, joinFields, timestampID
class Card:
_qa: Optional[Dict[str, Union[str, int]]]
_qa: Optional[Dict[str, Union[str, int, List[AVTag]]]]
_note: Optional[Note]
timerStarted: Optional[float]
lastIvl: int
@ -149,6 +150,12 @@ lapses=?, left=?, odue=?, odid=?, did=? where id = ?""",
def a(self) -> str:
return self.css() + self._getQA()["a"]
def question_av_tags(self) -> List[AVTag]:
return self._qa["q_av_tags"] # type: ignore
def answer_av_tags(self) -> List[AVTag]:
return self._qa["a_av_tags"] # type: ignore
def css(self) -> str:
return "<style>%s</style>" % self.model()["css"]

View file

@ -30,8 +30,9 @@ from anki.notes import Note
from anki.rsbackend import RustBackend
from anki.sched import Scheduler as V1Scheduler
from anki.schedv2 import Scheduler as V2Scheduler
from anki.sound import AVTag
from anki.tags import TagManager
from anki.template import QAData, TemplateRenderContext, render_card
from anki.template import QAData, RenderOutput, TemplateRenderContext, render_card
from anki.utils import (
devMode,
fieldChecksum,
@ -633,7 +634,7 @@ where c.nid = n.id and c.id in %s group by nid"""
# data is [cid, nid, mid, did, ord, tags, flds, cardFlags]
def _renderQA(
self, data: QAData, qfmt: Optional[str] = None, afmt: Optional[str] = None
) -> Dict[str, Union[str, int]]:
) -> Dict[str, Union[str, int, List[AVTag]]]:
# extract info from data
split_fields = splitFields(data[6])
card_ord = data[4]
@ -671,31 +672,44 @@ where c.nid = n.id and c.id in %s group by nid"""
# render fields. if any custom filters are encountered,
# the field_filter hook will be called.
try:
qtext, atext = render_card(self, qfmt, afmt, ctx)
output = render_card(self, qfmt, afmt, ctx)
except anki.rsbackend.BackendException as e:
errmsg = _("Card template has a problem:") + f"<br>{e}"
qtext = errmsg
atext = errmsg
output = RenderOutput(
question_text=errmsg,
answer_text=errmsg,
question_av_tags=[],
answer_av_tags=[],
)
# avoid showing the user a confusing blank card if they've
# forgotten to add a cloze deletion
if model["type"] == MODEL_CLOZE:
if not self.models._availClozeOrds(model, data[6], False):
qtext = (
qtext
+ "<p>"
+ _("Please edit this note and add some cloze deletions. (%s)")
% ("<a href=%s#cloze>%s</a>" % (HELP_SITE, _("help")))
)
output.question_text += "<p>" + _(
"Please edit this note and add some cloze deletions. (%s)"
) % ("<a href=%s#cloze>%s</a>" % (HELP_SITE, _("help")))
# allow add-ons to modify the generated result
(qtext, atext) = hooks.card_did_render((qtext, atext), ctx)
(output.question_text, output.answer_text) = hooks.card_did_render(
(output.question_text, output.answer_text), ctx
)
# legacy hook
qtext = runFilter("mungeQA", qtext, "q", fields, model, data, self)
atext = runFilter("mungeQA", atext, "a", fields, model, data, self)
output.question_text = runFilter(
"mungeQA", output.question_text, "q", fields, model, data, self
)
output.answer_text = runFilter(
"mungeQA", output.answer_text, "a", fields, model, data, self
)
return dict(q=qtext, a=atext, id=card_id)
return dict(
q=output.question_text,
a=output.answer_text,
id=card_id,
q_av_tags=output.question_av_tags,
a_av_tags=output.answer_av_tags,
)
def _qaData(self, where="") -> Any:
"Return [cid, nid, mid, did, ord, tags, flds, cardFlags] db query"

View file

@ -161,15 +161,16 @@ class RustBackend:
def strip_av_tags(self, text: str) -> str:
return self._run_command(pb.BackendInput(strip_av_tags=text)).strip_av_tags
def get_av_tags(self, text: str) -> List[AVTag]:
return list(
map(
av_tag_to_native,
self._run_command(
pb.BackendInput(get_av_tags=text)
).get_av_tags.av_tags,
def extract_av_tags(
self, text: str, question_side: bool
) -> Tuple[str, List[AVTag]]:
out = self._run_command(
pb.BackendInput(
extract_av_tags=pb.ExtractAVTagsIn(
text=text, question_side=question_side
)
)
).extract_av_tags
native_tags = list(map(av_tag_to_native, out.av_tags))
def flag_av_tags(self, text: str) -> str:
return self._run_command(pb.BackendInput(flag_av_tags=text)).flag_av_tags
return out.text, native_tags

View file

@ -4,8 +4,7 @@
"""
Sound/TTS references extracted from card text.
Use collection.backend.strip_av_tags(string) to remove all tags,
and collection.backend.get_av_tags(string) to get a list of AVTags.
These can be accessed via eg card.question_av_tags()
"""
from __future__ import annotations

View file

@ -29,12 +29,14 @@ template_legacy.py file, using the legacy addHook() system.
from __future__ import annotations
import re
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
import anki
from anki import hooks
from anki.models import NoteType
from anki.rsbackend import TemplateReplacementList
from anki.sound import AVTag
QAData = Tuple[
# Card ID this QA comes from. Corresponds to 'cid' column.
@ -122,18 +124,34 @@ class TemplateRenderContext:
return self._note_type
@dataclass
class RenderOutput:
question_text: str
answer_text: str
question_av_tags: List[AVTag]
answer_av_tags: List[AVTag]
def render_card(
col: anki.storage._Collection, qfmt: str, afmt: str, ctx: TemplateRenderContext
) -> Tuple[str, str]:
"""Renders the provided templates, returning rendered q & a text.
) -> RenderOutput:
"""Renders the provided templates, returning rendered output.
Will raise if the template is invalid."""
(qnodes, anodes) = col.backend.render_card(qfmt, afmt, ctx.fields(), ctx.card_ord())
qtext = apply_custom_filters(qnodes, ctx, front_side=None)
atext = apply_custom_filters(anodes, ctx, front_side=qtext)
qtext, q_avtags = col.backend.extract_av_tags(qtext, True)
return qtext, atext
atext = apply_custom_filters(anodes, ctx, front_side=qtext)
atext, a_avtags = col.backend.extract_av_tags(atext, False)
return RenderOutput(
question_text=qtext,
answer_text=atext,
question_av_tags=q_avtags,
answer_av_tags=a_avtags,
)
def apply_custom_filters(
@ -153,7 +171,7 @@ def apply_custom_filters(
else:
# do we need to inject in FrontSide?
if node.field_name == "FrontSide" and front_side is not None:
node.current_text = ctx.col().backend.strip_av_tags(front_side)
node.current_text = front_side
field_text = node.current_text
for filter_name in node.filters:

View file

@ -142,7 +142,7 @@ def test_furigana():
# and should avoid sound
n["Front"] = "foo[sound:abc.mp3]"
n.flush()
assert "sound:" in c.q(reload=True)
assert "anki:play" in c.q(reload=True)
# it shouldn't throw an error while people are editing
m["tmpls"][0]["qfmt"] = "{{kana:}}"
mm.save(m)

View file

@ -32,7 +32,6 @@ from aqt.utils import (
askUser,
getOnlyText,
getTag,
mungeQA,
openHelp,
qtMenuShortcutWorkaround,
restoreGeom,
@ -1682,9 +1681,9 @@ where id in %s"""
# need to force reload even if answer
txt = c.q(reload=True)
questionAudio = []
question_audio = []
if self._previewBothSides:
questionAudio = self.mw.col.backend.get_av_tags(txt)
question_audio = c.question_av_tags()
if self._previewState == "answer":
func = "_showAnswer"
txt = c.a()
@ -1694,13 +1693,13 @@ where id in %s"""
if self.mw.reviewer.autoplay(c):
# if we're showing both sides at once, play question audio first
av_player.play_tags(questionAudio)
av_player.play_tags(question_audio)
# then play any audio that hasn't already been played
answer_audio = self.mw.col.backend.get_av_tags(txt)
unplayed_audio = [x for x in answer_audio if x not in questionAudio]
answer_audio = c.answer_av_tags()
unplayed_audio = [x for x in answer_audio if x not in question_audio]
av_player.extend_and_play(unplayed_audio)
txt = mungeQA(self.col, txt)
txt = self.mw.prepare_card_text_for_display(txt)
gui_hooks.card_will_show(
txt, c, "preview" + self._previewState.capitalize()
)

View file

@ -5,8 +5,10 @@
import collections
import json
import re
from typing import Optional
import aqt
from anki.cards import Card
from anki.consts import *
from anki.lang import _, ngettext
from anki.utils import isMac, isWin, joinFields
@ -18,7 +20,6 @@ from aqt.utils import (
askUser,
downArrow,
getOnlyText,
mungeQA,
openHelp,
restoreGeom,
saveGeom,
@ -29,6 +30,8 @@ from aqt.webview import AnkiWebView
class CardLayout(QDialog):
card: Optional[Card]
def __init__(self, mw, note, ord=0, parent=None, addMode=False):
QDialog.__init__(self, parent or mw, Qt.Window)
mw.setupDialogGC(self)
@ -339,10 +342,10 @@ Please create a new card type first."""
bodyclass = theme_manager.body_classes_for_card_ord(c.ord)
q = ti(mungeQA(self.mw.col, c.q(reload=True)))
q = ti(self.mw.prepare_card_text_for_display(c.q(reload=True)))
q = gui_hooks.card_will_show(q, c, "clayoutQuestion")
a = ti(mungeQA(self.mw.col, c.a()), type="a")
a = ti(self.mw.prepare_card_text_for_display(c.a()), type="a")
a = gui_hooks.card_will_show(a, c, "clayoutAnswer")
# use _showAnswer to avoid the longer delay
@ -350,7 +353,7 @@ Please create a new card type first."""
self.pform.backWeb.eval("_showAnswer(%s, '%s');" % (json.dumps(a), bodyclass))
if c.id not in self.playedAudio:
av_player.play_from_text(self.mw.col, c.q() + c.a())
av_player.play_tags(c.question_av_tags() + c.answer_av_tags())
self.playedAudio[c.id] = True
self.updateCardNames()

View file

@ -24,11 +24,8 @@ def bodyClass(col, card) -> str:
def allSounds(text) -> List:
print("allSounds() deprecated")
return [
x.filename
for x in aqt.mw.col.backend.get_av_tags(text)
if isinstance(x, SoundOrVideoTag)
]
text, tags = aqt.mw.col.backend.extract_av_tags(text, True)
return [x.filename for x in tags if isinstance(x, SoundOrVideoTag)]
def stripSounds(text) -> str:

View file

@ -400,14 +400,17 @@ close the profile or restart Anki."""
def setupSound(self) -> None:
aqt.sound.setup_audio(self.taskman, self.pm.base)
def process_av_tags(self, text: str) -> Tuple[str, List[AVTag]]:
"Return card text with play buttons added, and the extracted AV tags."
tags = self.col.backend.get_av_tags(text)
def _add_play_buttons(self, text: str) -> str:
"Return card text with play buttons added, or stripped."
if self.pm.profile.get("showPlayButtons", True):
text = aqt.sound.av_flags_to_html(self.col.backend.flag_av_tags(text))
return aqt.sound.av_refs_to_play_icons(text)
else:
text = self.col.backend.strip_av_tags(text)
return (text, tags)
return aqt.sound.strip_av_refs(text)
def prepare_card_text_for_display(self, text: str) -> str:
text = self.col.media.escapeImages(text)
text = self._add_play_buttons(text)
return text
# Collection load/unload
##########################################################################

View file

@ -13,20 +13,13 @@ from typing import List, Optional
from anki import hooks
from anki.cards import Card
from anki.lang import _, ngettext
from anki.sound import AVTag
from anki.utils import stripHTML
from aqt import AnkiQt, gui_hooks
from aqt.qt import *
from aqt.sound import av_player, getAudio
from aqt.sound import av_player, getAudio, play_clicked_audio
from aqt.theme import theme_manager
from aqt.toolbar import BottomBar
from aqt.utils import (
askUserDialog,
downArrow,
mungeQA,
qtMenuShortcutWorkaround,
tooltip,
)
from aqt.utils import askUserDialog, downArrow, qtMenuShortcutWorkaround, tooltip
class Reviewer:
@ -40,7 +33,6 @@ class Reviewer:
self.hadCardQueue = False
self._answeredIds: List[int] = []
self._recordedAudio = None
self._current_side_audio: Optional[List[AVTag]] = None
self.typeCorrect = None # web init happens before this is set
self.state: Optional[str] = None
self.bottom = BottomBar(mw, mw.bottomWeb)
@ -116,21 +108,12 @@ class Reviewer:
state = self.state
c = self.card
if state == "question":
av_player.play_from_text(self.mw.col, c.q())
av_player.play_tags(c.question_av_tags())
elif state == "answer":
txt = ""
tags = c.answer_av_tags()
if self._replayq(c, previewer):
txt = c.q()
txt += c.a()
av_player.play_from_text(self.mw.col, txt)
def on_play_button(self, idx: int):
try:
tag = self._current_side_audio[idx]
except IndexError:
return
av_player.play_tags([tag])
tags = c.question_av_tags() + tags
av_player.play_tags(tags)
# Initializing the webview
##########################################################################
@ -176,7 +159,7 @@ class Reviewer:
##########################################################################
def _mungeQA(self, buf):
return self.typeAnsFilter(mungeQA(self.mw.col, buf))
return self.typeAnsFilter(self.mw.prepare_card_text_for_display(buf))
def _showQuestion(self) -> None:
self._reps += 1
@ -192,9 +175,9 @@ The front of this card is empty. Please run Tools>Empty Cards."""
else:
q = c.q()
q, self._current_side_audio = self.mw.process_av_tags(q)
# play audio?
if self.autoplay(c):
av_player.play_tags(self._current_side_audio)
av_player.play_tags(c.question_av_tags())
# render & update bottom
q = self._mungeQA(q)
@ -236,9 +219,9 @@ The front of this card is empty. Please run Tools>Empty Cards."""
c = self.card
a = c.a()
# play audio?
a, self._current_side_audio = self.mw.process_av_tags(a)
if self.autoplay(c):
av_player.play_tags(self._current_side_audio)
av_player.play_tags(c.answer_av_tags())
a = self._mungeQA(a)
a = gui_hooks.card_will_show(a, c, "reviewAnswer")
# render and update bottom
@ -333,7 +316,7 @@ The front of this card is empty. Please run Tools>Empty Cards."""
elif url == "more":
self.showContextMenu()
elif url.startswith("play:"):
self.on_play_button(int(url.split(":")[1]))
play_clicked_audio(url, self.card)
else:
print("unrecognized anki link:", url)

View file

@ -16,8 +16,8 @@ from typing import Any, Callable, Dict, List, Optional, Tuple
import pyaudio
import anki
import aqt
from anki.cards import Card
from anki.lang import _
from anki.sound import AVTag, SoundOrVideoTag
from anki.utils import isLin, isMac, isWin
@ -104,14 +104,6 @@ class AVPlayer:
self._enqueued.extend(tags)
self._play_next_if_idle()
def play_from_text(self, col: anki.storage._Collection, text: str) -> None:
tags = col.backend.get_av_tags(text)
self.play_tags(tags)
def extend_from_text(self, col: anki.storage._Collection, text: str) -> None:
tags = col.backend.get_av_tags(text)
self.extend_and_play(tags)
def stop_and_clear_queue(self) -> None:
self._enqueued = []
self._stop_if_playing()
@ -572,9 +564,7 @@ def play(filename: str) -> None:
def playFromText(text) -> None:
from aqt import mw
av_player.extend_from_text(mw.col, text)
print("playFromText() deprecated")
# legacy globals
@ -590,19 +580,39 @@ for (k, v) in _exports:
# Tag handling
##########################################################################
AV_FLAG_RE = re.compile(r"\[anki:play\](\d+)\[/anki:play]")
AV_FLAG_RE = re.compile(r"\[anki:(play:.:\d+)\]")
def av_flags_to_html(text: str) -> str:
def strip_av_refs(text: str) -> str:
return AV_FLAG_RE.sub("", text)
def av_refs_to_play_icons(text: str) -> str:
"""Add play icons into the HTML.
When clicked, the icon will call eg pycmd('play:q:1').
"""
def repl(match: re.Match) -> str:
return f"""
<a class=soundLink href=# onclick="pycmd('play:{match.group(1)}'); return false;">
<a class=soundLink href=# onclick="pycmd('{match.group(1)}'); return false;">
<img class=playImage src='/_anki/imgs/play.png'>
</a>"""
return AV_FLAG_RE.sub(repl, text)
def play_clicked_audio(pycmd: str, card: Card) -> None:
"""eg. if pycmd is 'play:q:0', play the first audio on the question side."""
play, context, str_idx = pycmd.split(":")
idx = int(str_idx)
if context == "q":
tags = card.question_av_tags()
else:
tags = card.answer_av_tags()
av_player.play_tags([tags[idx]])
# Init defaults
##########################################################################

View file

@ -480,8 +480,8 @@ def restoreHeader(widget, key):
def mungeQA(col, txt):
print("mungeQA() deprecated; use mw.prepare_card_text_for_display()")
txt = col.media.escapeImages(txt)
txt = col.backend.strip_av_tags(txt)
return txt

View file

@ -10,7 +10,7 @@ use crate::template::{
render_card, without_legacy_template_directives, FieldMap, FieldRequirements, ParsedTemplate,
RenderedNode,
};
use crate::text::{av_tags_in_string, flag_av_tags, strip_av_tags, AVTag};
use crate::text::{extract_av_tags, strip_av_tags, AVTag};
use prost::Message;
use std::collections::{HashMap, HashSet};
use std::path::PathBuf;
@ -100,8 +100,7 @@ impl Backend {
OValue::LocalMinutesWest(local_minutes_west_for_stamp(stamp))
}
Value::StripAvTags(text) => OValue::StripAvTags(strip_av_tags(&text).into()),
Value::GetAvTags(text) => OValue::GetAvTags(self.get_av_tags(&text)),
Value::FlagAvTags(text) => OValue::FlagAvTags(flag_av_tags(&text).into()),
Value::ExtractAvTags(input) => OValue::ExtractAvTags(self.extract_av_tags(input)),
})
}
@ -183,11 +182,13 @@ impl Backend {
})
}
fn get_av_tags(&self, text: &str) -> pt::GetAvTagsOut {
let tags = av_tags_in_string(text)
fn extract_av_tags(&self, input: pt::ExtractAvTagsIn) -> pt::ExtractAvTagsOut {
let (text, tags) = extract_av_tags(&input.text, input.question_side);
let pt_tags = tags
.into_iter()
.map(|avtag| match avtag {
AVTag::SoundOrVideo(file) => pt::AvTag {
value: Some(pt::av_tag::Value::SoundOrVideo(file.to_string())),
value: Some(pt::av_tag::Value::SoundOrVideo(file)),
},
AVTag::TextToSpeech {
field_text,
@ -196,16 +197,19 @@ impl Backend {
other_args,
} => pt::AvTag {
value: Some(pt::av_tag::Value::Tts(pt::TtsTag {
field_text: field_text.to_string(),
lang: lang.to_string(),
voices: voices.into_iter().map(ToOwned::to_owned).collect(),
other_args: other_args.into_iter().map(ToOwned::to_owned).collect(),
field_text,
lang,
voices,
other_args,
})),
},
})
.collect();
pt::GetAvTagsOut { av_tags: tags }
pt::ExtractAvTagsOut {
text: text.into(),
av_tags: pt_tags,
}
}
}

View file

@ -3,7 +3,6 @@
use crate::err::{Result, TemplateError};
use crate::template_filters::apply_filters;
use crate::text::strip_av_tags;
use lazy_static::lazy_static;
use nom;
use nom::branch::alt;
@ -269,7 +268,6 @@ pub(crate) struct RenderContext<'a> {
pub nonempty_fields: &'a HashSet<&'a str>,
pub question_side: bool,
pub card_ord: u16,
pub front_text: Option<Cow<'a, str>>,
}
impl ParsedTemplate<'_> {
@ -302,20 +300,14 @@ fn render_into(
key: key @ "FrontSide",
..
} => {
if let Some(front_side) = &context.front_text {
// a fully rendered front side is available, so we can
// bake it into the output
append_str_to_nodes(rendered_nodes, front_side.as_ref());
} else {
// the front side contains unknown filters, and must
// be completed by the Python code
// defer FrontSide rendering to Python, as extra
// filters may be required
rendered_nodes.push(RenderedNode::Replacement {
field_name: (*key).to_string(),
filters: vec![],
current_text: "".into(),
});
}
}
Replacement { key: "", filters } if !filters.is_empty() => {
// if a filter is provided, we accept an empty field name to
// mean 'pass an empty string to the filter, and it will add
@ -435,19 +427,12 @@ pub fn render_card(
nonempty_fields: &nonempty_fields(field_map),
question_side: true,
card_ord,
front_text: None,
};
// question side
let qnorm = without_legacy_template_directives(qfmt);
let qnodes = ParsedTemplate::from_text(qnorm.as_ref())?.render(&context)?;
// if the question side didn't have any unknown filters, we can pass
// FrontSide in now
if let [RenderedNode::Text { ref text }] = *qnodes.as_slice() {
context.front_text = Some(strip_av_tags(text));
}
// answer side
context.question_side = false;
let anorm = without_legacy_template_directives(afmt);
@ -517,10 +502,9 @@ mod test {
use super::{FieldMap, ParsedNode::*, ParsedTemplate as PT};
use crate::err::TemplateError;
use crate::template::{
field_is_empty, nonempty_fields, render_card, without_legacy_template_directives,
FieldRequirements, RenderContext, RenderedNode,
field_is_empty, nonempty_fields, without_legacy_template_directives, FieldRequirements,
RenderContext,
};
use crate::text::strip_html;
use std::collections::{HashMap, HashSet};
use std::iter::FromIterator;
@ -683,7 +667,6 @@ mod test {
nonempty_fields: &nonempty_fields(&map),
question_side: true,
card_ord: 1,
front_text: None,
};
use crate::template::RenderedNode as FN;
@ -780,39 +763,4 @@ mod test {
}]
);
}
fn get_complete_template(nodes: &Vec<RenderedNode>) -> Option<&str> {
if let [RenderedNode::Text { ref text }] = nodes.as_slice() {
Some(text.as_str())
} else {
None
}
}
#[test]
fn test_render_full() {
// make sure front and back side renders cloze differently
let fmt = "{{cloze:Text}}";
let clozed_text = "{{c1::one}} {{c2::two::hint}}";
let map: HashMap<_, _> = vec![("Text", clozed_text)].into_iter().collect();
let (qnodes, anodes) = render_card(fmt, fmt, &map, 0).unwrap();
assert_eq!(
strip_html(get_complete_template(&qnodes).unwrap()),
"[...] two"
);
assert_eq!(
strip_html(get_complete_template(&anodes).unwrap()),
"one two"
);
// FrontSide should render if only standard modifiers were used
let (_qnodes, anodes) =
render_card("{{kana:text:Text}}", "{{FrontSide}}", &map, 1).unwrap();
assert_eq!(get_complete_template(&anodes).unwrap(), clozed_text);
// But if a custom modifier was used, it's deferred to the Python code
let (_qnodes, anodes) = render_card("{{custom:Text}}", "{{FrontSide}}", &map, 1).unwrap();
assert_eq!(get_complete_template(&anodes).is_none(), true)
}
}

View file

@ -341,7 +341,6 @@ field</a>
nonempty_fields: &Default::default(),
question_side: false,
card_ord: 0,
front_text: None,
};
assert_eq!(
apply_filters("ignored", &["cloze", "type"], "Text", &ctx),
@ -357,7 +356,6 @@ field</a>
nonempty_fields: &Default::default(),
question_side: true,
card_ord: 0,
front_text: None,
};
assert_eq!(strip_html(&cloze_filter(text, &ctx)).as_ref(), "[...] two");
assert_eq!(

View file

@ -9,13 +9,13 @@ use std::collections::HashSet;
use std::ptr;
#[derive(Debug, PartialEq)]
pub enum AVTag<'a> {
SoundOrVideo(Cow<'a, str>),
pub enum AVTag {
SoundOrVideo(String),
TextToSpeech {
field_text: Cow<'a, str>,
lang: &'a str,
voices: Vec<&'a str>,
other_args: Vec<&'a str>,
field_text: String,
lang: String,
voices: Vec<String>,
other_args: Vec<String>,
},
}
@ -78,27 +78,29 @@ pub fn strip_av_tags(text: &str) -> Cow<str> {
AV_TAGS.replace_all(text, "")
}
pub fn flag_av_tags(text: &str) -> Cow<str> {
let mut idx = 0;
AV_TAGS.replace_all(text, |_caps: &Captures| {
let text = format!("[anki:play]{}[/anki:play]", idx);
idx += 1;
text
})
}
pub fn av_tags_in_string(text: &str) -> impl Iterator<Item = AVTag> {
AV_TAGS.captures_iter(text).map(|caps| {
if let Some(av_file) = caps.get(1) {
AVTag::SoundOrVideo(decode_entities(av_file.as_str()))
/// Extract audio tags from string, replacing them with [anki:play] refs
pub fn extract_av_tags<'a>(text: &'a str, question_side: bool) -> (Cow<'a, str>, Vec<AVTag>) {
let mut tags = vec![];
let context = if question_side { 'q' } else { 'a' };
let replaced_text = AV_TAGS.replace_all(text, |caps: &Captures| {
// extract
let tag = if let Some(av_file) = caps.get(1) {
AVTag::SoundOrVideo(decode_entities(av_file.as_str()).into())
} else {
let args = caps.get(2).unwrap();
let field_text = caps.get(3).unwrap();
tts_tag_from_string(field_text.as_str(), args.as_str())
}
})
};
tags.push(tag);
// and replace with reference
format!("[anki:play:{}:{}]", context, tags.len() - 1)
});
(replaced_text, tags)
}
fn tts_tag_from_string<'a>(field_text: &'a str, args: &'a str) -> AVTag<'a> {
fn tts_tag_from_string<'a>(field_text: &'a str, args: &'a str) -> AVTag {
let mut other_args = vec![];
let mut split_args = args.split(' ');
let lang = split_args.next().unwrap_or("");
@ -109,15 +111,15 @@ fn tts_tag_from_string<'a>(field_text: &'a str, args: &'a str) -> AVTag<'a> {
voices = remaining_arg
.split('=')
.nth(1)
.map(|voices| voices.split(',').collect());
.map(|voices| voices.split(',').map(ToOwned::to_owned).collect());
} else {
other_args.push(remaining_arg);
other_args.push(remaining_arg.to_owned());
}
}
AVTag::TextToSpeech {
field_text: strip_html_for_tts(field_text),
lang,
field_text: strip_html_for_tts(field_text).into(),
lang: lang.into(),
voices: voices.unwrap_or_else(Vec::new),
other_args,
}
@ -149,7 +151,7 @@ pub fn cloze_numbers_in_string(html: &str) -> HashSet<u16> {
#[cfg(test)]
mod test {
use crate::text::{
av_tags_in_string, cloze_numbers_in_string, flag_av_tags, strip_av_tags, strip_html,
cloze_numbers_in_string, extract_av_tags, strip_av_tags, strip_html,
strip_html_preserving_image_filenames, AVTag,
};
use std::collections::HashSet;
@ -188,22 +190,21 @@ mod test {
let s =
"abc[sound:fo&amp;o.mp3]def[anki:tts][en_US voices=Bob,Jane]foo<br>1&gt;2[/anki:tts]gh";
assert_eq!(strip_av_tags(s), "abcdefgh");
let (text, tags) = extract_av_tags(s, true);
assert_eq!(text, "abc[anki:play:q:0]def[anki:play:q:1]gh");
assert_eq!(
av_tags_in_string(s).collect::<Vec<_>>(),
tags,
vec![
AVTag::SoundOrVideo("fo&o.mp3".into()),
AVTag::TextToSpeech {
field_text: "foo 1>2".into(),
lang: "en_US",
voices: vec!["Bob", "Jane"],
lang: "en_US".into(),
voices: vec!["Bob".into(), "Jane".into()],
other_args: vec![]
},
]
);
assert_eq!(
flag_av_tags(s),
"abc[anki:play]0[/anki:play]def[anki:play]1[/anki:play]gh"
);
}
}