mirror of
https://github.com/ankitects/anki.git
synced 2025-09-20 06:52:21 -04:00

* Add crate csv
* Add start of csv importing on backend
* Add Menomosyne serializer
* Add csv and json importing on backend
* Add plaintext importing on frontend
* Add csv metadata extraction on backend
* Add csv importing with GUI
* Fix missing dfa file in build
Added compile_data_attr, then re-ran cargo/update.py.
* Don't use doubly buffered reader in csv
* Escape HTML entities if CSV is not HTML
Also use name 'is_html' consistently.
* Use decimal number as foreign ease (like '2.5')
* ForeignCard.ivl → ForeignCard.interval
* Only allow fixed set of CSV delimiters
* Map timestamp of ForeignCard to native due time
* Don't trim CSV records
* Document use of empty strings for defaults
* Avoid creating CardGenContexts for every note
This requires CardGenContext to be generic, so it works both with an
owned and borrowed notetype.
* Show all accepted file types in import file picker
* Add import_json_file()
* factor → ease_factor
* delimter_from_value → delimiter_from_value
* Map columns to fields, not the other way around
* Fallback to current config for csv metadata
* Add start of new import csv screen
* Temporary fix for compilation issue on Linux/Mac
* Disable jest bazel action for import-csv
Jest fails with an error code if no tests are available, but this would
not be noticable on Windows as Jest is not run there.
* Fix field mapping issue
* Revert "Temporary fix for compilation issue on Linux/Mac"
This reverts commit 21f8a26140
.
* Add HtmlSwitch and move Switch to components
* Fix spacing and make selectors consistent
* Fix shortcut tooltip
* Place import button at the top with path
* Fix meta column indices
* Remove NotetypeForString
* Fix queue and type of foreign cards
* Support different dupe resolution strategies
* Allow dupe resolution selection when importing CSV
* Test import of unnormalized text
Close #1863.
* Fix logging of foreign notes
* Implement CSV exports
* Use db_scalar() in notes_table_len()
* Rework CSV metadata
- Notetypes and decks are either defined by a global id or by a column.
- If a notetype id is provided, its field map must also be specified.
- If a notetype column is provided, fields are now mapped by index
instead of name at import time. So the first non-meta column is used for
the first field of every note, regardless of notetype. This makes
importing easier and should improve compatiblity with files without a
notetype column.
- Ensure first field can be mapped to a column.
- Meta columns must be defined as `#[meta name]:[column index]` instead
of in the `#columns` tag.
- Column labels contain the raw names defined by the file and must be
prettified by the frontend.
* Adjust frontend to new backend column mapping
* Add force flags for is_html and delimiter
* Detect if CSV is HTML by field content
* Update dupe resolution labels
* Simplify selectors
* Fix coalescence of oneofs in TS
* Disable meta columns from selection
Plus a lot of refactoring.
* Make import button stick to the bottom
* Write delimiter and html flag into csv
* Refetch field map after notetype change
* Fix log labels for csv import
* Log notes whose deck/notetype was missing
* Fix hiding of empty log queues
* Implement adding tags to all notes of a csv
* Fix dupe resolution not being set in log
* Implement adding tags to updated notes of a csv
* Check first note field is not empty
* Temporary fix for build on Linux/Mac
* Fix inverted html check (dae)
* Remove unused ftl string
* Delimiter → Separator
* Remove commented-out line
* Don't accept .json files
* Tweak tag ftl strings
* Remove redundant blur call
* Strip sound and add spaces in csv export
* Export HTML by default
* Fix unset deck in Mnemosyne import
Also accept both numbers and strings for notetypes and decks in JSON.
* Make DupeResolution::Update the default
* Fix missing dot in extension
* Make column indices 1-based
* Remove StickContainer from TagEditor
Fixes line breaking, border and z index on ImportCsvPage.
* Assign different key combos to tag editors
* Log all updated duplicates
Add a log field for the true number of found notes.
* Show identical notes as skipped
* Split tag-editor into separate ts module (dae)
* Add progress for CSV export
* Add progress for text import
* Tidy-ups after tag-editor split (dae)
- import-csv no longer depends on editor
- remove some commented lines
188 lines
4.9 KiB
Protocol Buffer
188 lines
4.9 KiB
Protocol Buffer
// Copyright: Ankitects Pty Ltd and contributors
|
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
|
|
|
syntax = "proto3";
|
|
|
|
package anki.import_export;
|
|
|
|
import "anki/cards.proto";
|
|
import "anki/collection.proto";
|
|
import "anki/notes.proto";
|
|
import "anki/generic.proto";
|
|
|
|
service ImportExportService {
|
|
rpc ImportCollectionPackage(ImportCollectionPackageRequest)
|
|
returns (generic.Empty);
|
|
rpc ExportCollectionPackage(ExportCollectionPackageRequest)
|
|
returns (generic.Empty);
|
|
rpc ImportAnkiPackage(ImportAnkiPackageRequest) returns (ImportResponse);
|
|
rpc ExportAnkiPackage(ExportAnkiPackageRequest) returns (generic.UInt32);
|
|
rpc GetCsvMetadata(CsvMetadataRequest) returns (CsvMetadata);
|
|
rpc ImportCsv(ImportCsvRequest) returns (ImportResponse);
|
|
rpc ExportNoteCsv(ExportNoteCsvRequest) returns (generic.UInt32);
|
|
rpc ExportCardCsv(ExportCardCsvRequest) returns (generic.UInt32);
|
|
rpc ImportJsonFile(generic.String) returns (ImportResponse);
|
|
rpc ImportJsonString(generic.String) returns (ImportResponse);
|
|
}
|
|
|
|
message ImportCollectionPackageRequest {
|
|
string col_path = 1;
|
|
string backup_path = 2;
|
|
string media_folder = 3;
|
|
string media_db = 4;
|
|
}
|
|
|
|
message ExportCollectionPackageRequest {
|
|
string out_path = 1;
|
|
bool include_media = 2;
|
|
bool legacy = 3;
|
|
}
|
|
|
|
message ImportAnkiPackageRequest {
|
|
string package_path = 1;
|
|
}
|
|
|
|
message ImportResponse {
|
|
message Note {
|
|
notes.NoteId id = 1;
|
|
repeated string fields = 2;
|
|
}
|
|
message Log {
|
|
repeated Note new = 1;
|
|
repeated Note updated = 2;
|
|
repeated Note duplicate = 3;
|
|
repeated Note conflicting = 4;
|
|
repeated Note first_field_match = 5;
|
|
repeated Note missing_notetype = 6;
|
|
repeated Note missing_deck = 7;
|
|
repeated Note empty_first_field = 8;
|
|
ImportCsvRequest.DupeResolution dupe_resolution = 9;
|
|
// Usually the sum of all queues, but may be lower if multiple duplicates
|
|
// have been updated with the same note.
|
|
uint32 found_notes = 10;
|
|
}
|
|
collection.OpChanges changes = 1;
|
|
Log log = 2;
|
|
}
|
|
|
|
message ExportAnkiPackageRequest {
|
|
string out_path = 1;
|
|
bool with_scheduling = 2;
|
|
bool with_media = 3;
|
|
bool legacy = 4;
|
|
ExportLimit limit = 5;
|
|
}
|
|
|
|
message PackageMetadata {
|
|
enum Version {
|
|
VERSION_UNKNOWN = 0;
|
|
// When `meta` missing, and collection.anki2 file present.
|
|
VERSION_LEGACY_1 = 1;
|
|
// When `meta` missing, and collection.anki21 file present.
|
|
VERSION_LEGACY_2 = 2;
|
|
// Implies MediaEntry media map, and zstd compression.
|
|
// collection.21b file
|
|
VERSION_LATEST = 3;
|
|
}
|
|
|
|
Version version = 1;
|
|
}
|
|
|
|
message MediaEntries {
|
|
message MediaEntry {
|
|
string name = 1;
|
|
uint32 size = 2;
|
|
bytes sha1 = 3;
|
|
|
|
/// Legacy media maps may include gaps in the media list, so the original
|
|
/// file index is recorded when importing from a HashMap. This field is not
|
|
/// set when exporting.
|
|
optional uint32 legacy_zip_filename = 255;
|
|
}
|
|
|
|
repeated MediaEntry entries = 1;
|
|
}
|
|
|
|
message ImportCsvRequest {
|
|
enum DupeResolution {
|
|
UPDATE = 0;
|
|
ADD = 1;
|
|
IGNORE = 2;
|
|
// UPDATE_IF_NEWER = 3;
|
|
}
|
|
string path = 1;
|
|
CsvMetadata metadata = 2;
|
|
DupeResolution dupe_resolution = 3;
|
|
}
|
|
|
|
message CsvMetadataRequest {
|
|
string path = 1;
|
|
optional CsvMetadata.Delimiter delimiter = 2;
|
|
optional int64 notetype_id = 3;
|
|
}
|
|
|
|
// Column indices are 1-based to make working with them in TS easier, where
|
|
// unset numerical fields default to 0.
|
|
message CsvMetadata {
|
|
// Order roughly in ascending expected frequency in note text, because the
|
|
// delimiter detection algorithm is stupidly picking the first one it
|
|
// encounters.
|
|
enum Delimiter {
|
|
TAB = 0;
|
|
PIPE = 1;
|
|
SEMICOLON = 2;
|
|
COLON = 3;
|
|
COMMA = 4;
|
|
SPACE = 5;
|
|
}
|
|
message MappedNotetype {
|
|
int64 id = 1;
|
|
// Source column indices for note fields. One-based. 0 means n/a.
|
|
repeated uint32 field_columns = 2;
|
|
}
|
|
Delimiter delimiter = 1;
|
|
bool is_html = 2;
|
|
repeated string global_tags = 3;
|
|
repeated string updated_tags = 4;
|
|
// Column names as defined by the file or empty strings otherwise. Also used
|
|
// to determine the number of columns.
|
|
repeated string column_labels = 5;
|
|
oneof deck {
|
|
int64 deck_id = 6;
|
|
// One-based. 0 means n/a.
|
|
uint32 deck_column = 7;
|
|
}
|
|
oneof notetype {
|
|
// One notetype for all rows with given column mapping.
|
|
MappedNotetype global_notetype = 8;
|
|
// Row-specific notetypes with automatic mapping by index.
|
|
// One-based. 0 means n/a.
|
|
uint32 notetype_column = 9;
|
|
}
|
|
// One-based. 0 means n/a.
|
|
uint32 tags_column = 10;
|
|
bool force_delimiter = 11;
|
|
bool force_is_html = 12;
|
|
}
|
|
|
|
message ExportCardCsvRequest {
|
|
string out_path = 1;
|
|
bool with_html = 2;
|
|
ExportLimit limit = 3;
|
|
}
|
|
|
|
message ExportNoteCsvRequest {
|
|
string out_path = 1;
|
|
bool with_html = 2;
|
|
bool with_tags = 3;
|
|
ExportLimit limit = 4;
|
|
}
|
|
|
|
message ExportLimit {
|
|
oneof limit {
|
|
generic.Empty whole_collection = 1;
|
|
int64 deck_id = 2;
|
|
notes.NoteIds note_ids = 3;
|
|
cards.CardIds card_ids = 4;
|
|
}
|
|
}
|