mirror of
https://github.com/ankitects/anki.git
synced 2025-09-25 01:06:35 -04:00
Replace old extract_av_tags
and strip_av_tags
... with new `card_rendering` mod
This commit is contained in:
parent
939bddd5d6
commit
2dd0ef09df
5 changed files with 20 additions and 145 deletions
|
@ -5,14 +5,15 @@ use super::Backend;
|
||||||
pub(super) use crate::backend_proto::cardrendering_service::Service as CardRenderingService;
|
pub(super) use crate::backend_proto::cardrendering_service::Service as CardRenderingService;
|
||||||
use crate::{
|
use crate::{
|
||||||
backend_proto as pb,
|
backend_proto as pb,
|
||||||
|
card_rendering::{extract_av_tags, strip_av_tags},
|
||||||
latex::{extract_latex, extract_latex_expanding_clozes, ExtractedLatex},
|
latex::{extract_latex, extract_latex_expanding_clozes, ExtractedLatex},
|
||||||
markdown::render_markdown,
|
markdown::render_markdown,
|
||||||
notetype::{CardTemplateSchema11, RenderCardOutput},
|
notetype::{CardTemplateSchema11, RenderCardOutput},
|
||||||
prelude::*,
|
prelude::*,
|
||||||
template::RenderedNode,
|
template::RenderedNode,
|
||||||
text::{
|
text::{
|
||||||
decode_iri_paths, encode_iri_paths, extract_av_tags, sanitize_html_no_images,
|
decode_iri_paths, encode_iri_paths, sanitize_html_no_images, strip_html,
|
||||||
strip_av_tags, strip_html, strip_html_preserving_media_filenames, AvTag,
|
strip_html_preserving_media_filenames,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -21,34 +22,10 @@ impl CardRenderingService for Backend {
|
||||||
&self,
|
&self,
|
||||||
input: pb::ExtractAvTagsRequest,
|
input: pb::ExtractAvTagsRequest,
|
||||||
) -> Result<pb::ExtractAvTagsResponse> {
|
) -> Result<pb::ExtractAvTagsResponse> {
|
||||||
let (text, tags) = extract_av_tags(&input.text, input.question_side);
|
let out = extract_av_tags(&input.text, input.question_side, self.i18n());
|
||||||
let pt_tags = tags
|
|
||||||
.into_iter()
|
|
||||||
.map(|avtag| match avtag {
|
|
||||||
AvTag::SoundOrVideo(file) => pb::AvTag {
|
|
||||||
value: Some(pb::av_tag::Value::SoundOrVideo(file)),
|
|
||||||
},
|
|
||||||
AvTag::TextToSpeech {
|
|
||||||
field_text,
|
|
||||||
lang,
|
|
||||||
voices,
|
|
||||||
other_args,
|
|
||||||
speed,
|
|
||||||
} => pb::AvTag {
|
|
||||||
value: Some(pb::av_tag::Value::Tts(pb::TtsTag {
|
|
||||||
field_text,
|
|
||||||
lang,
|
|
||||||
voices,
|
|
||||||
speed,
|
|
||||||
other_args,
|
|
||||||
})),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
Ok(pb::ExtractAvTagsResponse {
|
Ok(pb::ExtractAvTagsResponse {
|
||||||
text: text.into(),
|
text: out.0,
|
||||||
av_tags: pt_tags,
|
av_tags: out.1,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,9 +117,7 @@ impl CardRenderingService for Backend {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn strip_av_tags(&self, input: pb::String) -> Result<pb::String> {
|
fn strip_av_tags(&self, input: pb::String) -> Result<pb::String> {
|
||||||
Ok(pb::String {
|
Ok(strip_av_tags(&input.val).into())
|
||||||
val: strip_av_tags(&input.val).into(),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn render_markdown(&self, input: pb::RenderMarkdownRequest) -> Result<pb::String> {
|
fn render_markdown(&self, input: pb::RenderMarkdownRequest) -> Result<pb::String> {
|
||||||
|
|
|
@ -9,11 +9,12 @@ use strum::{Display, EnumIter, EnumString, IntoEnumIterator};
|
||||||
use crate::{
|
use crate::{
|
||||||
backend_proto as pb,
|
backend_proto as pb,
|
||||||
card::{CardQueue, CardType},
|
card::{CardQueue, CardType},
|
||||||
|
card_rendering::extract_av_tags,
|
||||||
notetype::{CardTemplate, NotetypeKind},
|
notetype::{CardTemplate, NotetypeKind},
|
||||||
prelude::*,
|
prelude::*,
|
||||||
scheduler::{timespan::time_span, timing::SchedTimingToday},
|
scheduler::{timespan::time_span, timing::SchedTimingToday},
|
||||||
template::RenderedNode,
|
template::RenderedNode,
|
||||||
text::{extract_av_tags, html_to_text_line},
|
text::html_to_text_line,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Copy, Display, EnumIter, EnumString)]
|
#[derive(Debug, PartialEq, Clone, Copy, Display, EnumIter, EnumString)]
|
||||||
|
@ -270,7 +271,7 @@ impl RenderContext {
|
||||||
} => current_text,
|
} => current_text,
|
||||||
})
|
})
|
||||||
.join("");
|
.join("");
|
||||||
let question = extract_av_tags(&qnodes_text, true).0.to_string();
|
let question = extract_av_tags(&qnodes_text, true, &col.tr).0;
|
||||||
|
|
||||||
Ok(RenderContext {
|
Ok(RenderContext {
|
||||||
question,
|
question,
|
||||||
|
@ -410,7 +411,7 @@ impl RowContext {
|
||||||
} => current_text,
|
} => current_text,
|
||||||
})
|
})
|
||||||
.join("");
|
.join("");
|
||||||
let answer = extract_av_tags(&answer, false).0;
|
let answer = extract_av_tags(&answer, false, &self.tr).0;
|
||||||
html_to_text_line(
|
html_to_text_line(
|
||||||
if let Some(stripped) = answer.strip_prefix(&render_context.question) {
|
if let Some(stripped) = answer.strip_prefix(&render_context.question) {
|
||||||
stripped
|
stripped
|
||||||
|
|
|
@ -443,7 +443,6 @@ fn render_into(
|
||||||
.as_slice(),
|
.as_slice(),
|
||||||
key,
|
key,
|
||||||
context,
|
context,
|
||||||
tr,
|
|
||||||
),
|
),
|
||||||
None => {
|
None => {
|
||||||
// unknown field encountered
|
// unknown field encountered
|
||||||
|
|
|
@ -9,7 +9,6 @@ use regex::{Captures, Regex};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
cloze::{cloze_filter, cloze_only_filter},
|
cloze::{cloze_filter, cloze_only_filter},
|
||||||
i18n::I18n,
|
|
||||||
template::RenderContext,
|
template::RenderContext,
|
||||||
text::strip_html,
|
text::strip_html,
|
||||||
};
|
};
|
||||||
|
@ -26,7 +25,6 @@ pub(crate) fn apply_filters<'a>(
|
||||||
filters: &[&str],
|
filters: &[&str],
|
||||||
field_name: &str,
|
field_name: &str,
|
||||||
context: &RenderContext,
|
context: &RenderContext,
|
||||||
tr: &I18n,
|
|
||||||
) -> (Cow<'a, str>, Vec<String>) {
|
) -> (Cow<'a, str>, Vec<String>) {
|
||||||
let mut text: Cow<str> = text.into();
|
let mut text: Cow<str> = text.into();
|
||||||
|
|
||||||
|
@ -38,7 +36,7 @@ pub(crate) fn apply_filters<'a>(
|
||||||
};
|
};
|
||||||
|
|
||||||
for (idx, &filter_name) in filters.iter().enumerate() {
|
for (idx, &filter_name) in filters.iter().enumerate() {
|
||||||
match apply_filter(filter_name, text.as_ref(), field_name, context, tr) {
|
match apply_filter(filter_name, text.as_ref(), field_name, context) {
|
||||||
(true, None) => {
|
(true, None) => {
|
||||||
// filter did not change text
|
// filter did not change text
|
||||||
}
|
}
|
||||||
|
@ -69,7 +67,6 @@ fn apply_filter<'a>(
|
||||||
text: &'a str,
|
text: &'a str,
|
||||||
field_name: &str,
|
field_name: &str,
|
||||||
context: &RenderContext,
|
context: &RenderContext,
|
||||||
tr: &I18n,
|
|
||||||
) -> (bool, Option<String>) {
|
) -> (bool, Option<String>) {
|
||||||
let output_text = match filter_name {
|
let output_text = match filter_name {
|
||||||
"text" => strip_html(text),
|
"text" => strip_html(text),
|
||||||
|
@ -84,8 +81,8 @@ fn apply_filter<'a>(
|
||||||
// an empty filter name (caused by using two colons) is ignored
|
// an empty filter name (caused by using two colons) is ignored
|
||||||
"" => text.into(),
|
"" => text.into(),
|
||||||
_ => {
|
_ => {
|
||||||
if filter_name.starts_with("tts ") {
|
if let Some(options) = filter_name.strip_prefix("tts ") {
|
||||||
tts_filter(filter_name, text, tr)
|
tts_filter(options, text).into()
|
||||||
} else {
|
} else {
|
||||||
// unrecognized filter
|
// unrecognized filter
|
||||||
return (false, None);
|
return (false, None);
|
||||||
|
@ -194,12 +191,10 @@ return false;">
|
||||||
.into()
|
.into()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn tts_filter(filter_name: &str, text: &str, tr: &I18n) -> Cow<'static, str> {
|
fn tts_filter(options: &str, text: &str) -> String {
|
||||||
let args = filter_name.split_once(' ').map_or("", |t| t.1);
|
format!("[anki:tts lang={}]{}[/anki:tts]", options, text)
|
||||||
let text = text.replace("[...]", &tr.card_templates_blank());
|
|
||||||
|
|
||||||
format!("[anki:tts][{}]{}[/anki:tts]", args, text).into()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests
|
// Tests
|
||||||
//----------------------------------------
|
//----------------------------------------
|
||||||
|
|
||||||
|
@ -235,7 +230,6 @@ field</a>
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn typing() {
|
fn typing() {
|
||||||
let tr = I18n::template_only();
|
|
||||||
assert_eq!(type_filter("Front"), "[[type:Front]]");
|
assert_eq!(type_filter("Front"), "[[type:Front]]");
|
||||||
assert_eq!(type_cloze_filter("Front"), "[[type:cloze:Front]]");
|
assert_eq!(type_cloze_filter("Front"), "[[type:cloze:Front]]");
|
||||||
let ctx = RenderContext {
|
let ctx = RenderContext {
|
||||||
|
@ -245,7 +239,7 @@ field</a>
|
||||||
card_ord: 0,
|
card_ord: 0,
|
||||||
};
|
};
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
apply_filters("ignored", &["cloze", "type"], "Text", &ctx, &tr),
|
apply_filters("ignored", &["cloze", "type"], "Text", &ctx),
|
||||||
("[[type:cloze:Text]]".into(), vec![])
|
("[[type:cloze:Text]]".into(), vec![])
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -280,17 +274,9 @@ field</a>
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn tts() {
|
fn tts() {
|
||||||
let tr = I18n::template_only();
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
tts_filter("tts en_US voices=Bob,Jane", "foo", &tr),
|
tts_filter("en_US voices=Bob,Jane", "foo"),
|
||||||
"[anki:tts][en_US voices=Bob,Jane]foo[/anki:tts]"
|
"[anki:tts lang=en_US voices=Bob,Jane]foo[/anki:tts]"
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
tts_filter("tts en_US", "foo [...]", &tr),
|
|
||||||
format!(
|
|
||||||
"[anki:tts][en_US]foo {}[/anki:tts]",
|
|
||||||
tr.card_templates_blank()
|
|
||||||
)
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -175,32 +175,6 @@ pub fn strip_html_for_tts(html: &str) -> Cow<str> {
|
||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn strip_av_tags(text: &str) -> Cow<str> {
|
|
||||||
AV_TAGS.replace_all(text, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extract audio tags from string, replacing them with [anki:play] refs
|
|
||||||
pub fn extract_av_tags(text: &str, question_side: bool) -> (Cow<str>, Vec<AvTag>) {
|
|
||||||
let mut tags = vec![];
|
|
||||||
let context = if question_side { 'q' } else { 'a' };
|
|
||||||
let replaced_text = AV_TAGS.replace_all(text, |caps: &Captures| {
|
|
||||||
// extract
|
|
||||||
let tag = if let Some(av_file) = caps.get(1) {
|
|
||||||
AvTag::SoundOrVideo(decode_entities(av_file.as_str()).into())
|
|
||||||
} else {
|
|
||||||
let args = caps.get(2).unwrap();
|
|
||||||
let field_text = caps.get(3).unwrap();
|
|
||||||
tts_tag_from_string(field_text.as_str(), args.as_str())
|
|
||||||
};
|
|
||||||
tags.push(tag);
|
|
||||||
|
|
||||||
// and replace with reference
|
|
||||||
format!("[anki:play:{}:{}]", context, tags.len() - 1)
|
|
||||||
});
|
|
||||||
|
|
||||||
(replaced_text, tags)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(crate) struct MediaRef<'a> {
|
pub(crate) struct MediaRef<'a> {
|
||||||
pub full_ref: &'a str,
|
pub full_ref: &'a str,
|
||||||
|
@ -242,40 +216,6 @@ pub(crate) fn extract_media_refs(text: &str) -> Vec<MediaRef> {
|
||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
fn tts_tag_from_string<'a>(field_text: &'a str, args: &'a str) -> AvTag {
|
|
||||||
let mut other_args = vec![];
|
|
||||||
let mut split_args = args.split_ascii_whitespace();
|
|
||||||
let lang = split_args.next().unwrap_or("");
|
|
||||||
let mut voices = None;
|
|
||||||
let mut speed = 1.0;
|
|
||||||
|
|
||||||
for remaining_arg in split_args {
|
|
||||||
if remaining_arg.starts_with("voices=") {
|
|
||||||
voices = remaining_arg
|
|
||||||
.split('=')
|
|
||||||
.nth(1)
|
|
||||||
.map(|voices| voices.split(',').map(ToOwned::to_owned).collect());
|
|
||||||
} else if remaining_arg.starts_with("speed=") {
|
|
||||||
speed = remaining_arg
|
|
||||||
.split('=')
|
|
||||||
.nth(1)
|
|
||||||
.unwrap()
|
|
||||||
.parse()
|
|
||||||
.unwrap_or(1.0);
|
|
||||||
} else {
|
|
||||||
other_args.push(remaining_arg.to_owned());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
AvTag::TextToSpeech {
|
|
||||||
field_text: strip_html_for_tts(field_text).into(),
|
|
||||||
lang: lang.into(),
|
|
||||||
voices: voices.unwrap_or_else(Vec::new),
|
|
||||||
speed,
|
|
||||||
other_args,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn strip_html_preserving_media_filenames(html: &str) -> Cow<str> {
|
pub fn strip_html_preserving_media_filenames(html: &str) -> Cow<str> {
|
||||||
let without_fnames = HTML_MEDIA_TAGS.replace_all(html, r" ${1}${2}${3} ");
|
let without_fnames = HTML_MEDIA_TAGS.replace_all(html, r" ${1}${2}${3} ");
|
||||||
let without_html = strip_html(&without_fnames);
|
let without_html = strip_html(&without_fnames);
|
||||||
|
@ -497,32 +437,6 @@ mod test {
|
||||||
assert_eq!(strip_html_preserving_media_filenames("<html>"), "");
|
assert_eq!(strip_html_preserving_media_filenames("<html>"), "");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn audio() {
|
|
||||||
let s = concat!(
|
|
||||||
"abc[sound:fo&obar.mp3]def[anki:tts][en_US voices=Bob,Jane speed=1.2]",
|
|
||||||
"foo b<i><b>a</b>r</i><br>1>2[/anki:tts]gh",
|
|
||||||
);
|
|
||||||
assert_eq!(strip_av_tags(s), "abcdefgh");
|
|
||||||
|
|
||||||
let (text, tags) = extract_av_tags(s, true);
|
|
||||||
assert_eq!(text, "abc[anki:play:q:0]def[anki:play:q:1]gh");
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
tags,
|
|
||||||
vec![
|
|
||||||
AvTag::SoundOrVideo("fo&obar.mp3".into()),
|
|
||||||
AvTag::TextToSpeech {
|
|
||||||
field_text: "foo bar 1>2".into(),
|
|
||||||
lang: "en_US".into(),
|
|
||||||
voices: vec!["Bob".into(), "Jane".into()],
|
|
||||||
other_args: vec![],
|
|
||||||
speed: 1.2
|
|
||||||
},
|
|
||||||
]
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn combining() {
|
fn combining() {
|
||||||
assert!(matches!(without_combining("test"), Cow::Borrowed(_)));
|
assert!(matches!(without_combining("test"), Cow::Borrowed(_)));
|
||||||
|
|
Loading…
Reference in a new issue