Do DR calculation in parallel

Approx 5x faster on my machine
This commit is contained in:
Damien Elmes 2025-07-28 18:40:47 +10:00
parent 06e17dff11
commit d90e2c717c
5 changed files with 10 additions and 0 deletions

1
Cargo.lock generated
View file

@ -131,6 +131,7 @@ dependencies = [
"prost-reflect",
"pulldown-cmark 0.13.0",
"rand 0.9.1",
"rayon",
"regex",
"reqwest 0.12.20",
"rusqlite",

View file

@ -110,6 +110,7 @@ prost-types = "0.13"
pulldown-cmark = "0.13.0"
pyo3 = { version = "0.25.1", features = ["extension-module", "abi3", "abi3-py39"] }
rand = "0.9.1"
rayon = "1.10.0"
regex = "1.11.1"
reqwest = { version = "0.12.20", default-features = false, features = ["json", "socks", "stream", "multipart"] }
rusqlite = { version = "0.36.0", features = ["trace", "functions", "collation", "bundled"] }

View file

@ -81,6 +81,7 @@ pin-project.workspace = true
prost.workspace = true
pulldown-cmark.workspace = true
rand.workspace = true
rayon.workspace = true
regex.workspace = true
reqwest.workspace = true
rusqlite.workspace = true

View file

@ -16,6 +16,8 @@ use fsrs::FSRS;
use itertools::Itertools;
use rand::rngs::StdRng;
use rand::Rng;
use rayon::iter::IntoParallelIterator;
use rayon::iter::ParallelIterator;
use crate::card::CardQueue;
use crate::card::CardType;
@ -276,6 +278,7 @@ impl Collection {
) -> Result<SimulateFsrsWorkloadResponse> {
let (config, cards) = self.simulate_request_to_config(&req)?;
let dr_workload = (70u32..=99u32)
.into_par_iter()
.map(|dr| {
let result = simulate(
&config,

View file

@ -93,6 +93,10 @@ impl TimestampMillis {
pub fn adding_secs(self, secs: i64) -> Self {
Self(self.0 + secs * 1000)
}
pub fn elapsed_millis(self) -> u64 {
(Self::now().0 - self.0).max(0) as u64
}
}
fn elapsed() -> time::Duration {