From ce83001be6db383491c6ab197031547bcc8c0d2f Mon Sep 17 00:00:00 2001 From: Damien Elmes Date: Wed, 20 Jun 2012 09:37:56 +0900 Subject: [PATCH] treat 'not' as a normal search string --- anki/find.py | 6 +++--- tests/test_find.py | 7 ++++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/anki/find.py b/anki/find.py index c4c01dd57..e3756d69b 100644 --- a/anki/find.py +++ b/anki/find.py @@ -93,8 +93,8 @@ select distinct(n.id) from cards c, notes n where c.nid=n.id and """+preds elif c == "-": if token: token += c - elif not tokens or tokens[-1] != "not": - tokens.append("not") + elif not tokens or tokens[-1] != "-": + tokens.append("-") # normal character else: token += c @@ -140,7 +140,7 @@ select distinct(n.id) from cards c, notes n where c.nid=n.id and """+preds if s['bad']: return None, None # special tokens - if token == "not": + if token == "-": s['isnot'] = True elif token.lower() == "or": s['isor'] = True diff --git a/tests/test_find.py b/tests/test_find.py index b82737743..5c67ef152 100644 --- a/tests/test_find.py +++ b/tests/test_find.py @@ -7,9 +7,10 @@ def test_parse(): f = Finder(None) assert f._tokenize("hello world") == ["hello", "world"] assert f._tokenize("hello world") == ["hello", "world"] - assert f._tokenize("one -two") == ["one", "not", "two"] - assert f._tokenize("one --two") == ["one", "not", "two"] - assert f._tokenize("one or -two") == ["one", "or", "not", "two"] + assert f._tokenize("one -two") == ["one", "-", "two"] + assert f._tokenize("one --two") == ["one", "-", "two"] + assert f._tokenize("one - two") == ["one", "-", "two"] + assert f._tokenize("one or -two") == ["one", "or", "-", "two"] assert f._tokenize("'hello \"world\"'") == ["hello \"world\""] assert f._tokenize('"hello world"') == ["hello world"] assert f._tokenize("one (two or ( three or four))") == [