diff --git a/eorg/generate.py b/eorg/generate.py
index 203a513..1a641df 100644
--- a/eorg/generate.py
+++ b/eorg/generate.py
@@ -21,11 +21,24 @@ def img(doc, item, cls=''):
return f'{text}'
-def parse_text_html(doc, token, cls=''):
- if isinstance(token.value, list):
- for item in token.value:
- return handle_token(doc, item)
- return f'
{token.value}
' +def parse_text_html(doc, token, cls='', root=True): + if not isinstance(token.value, list): + return f'{token.value}
' + + print('test') + + response = StringIO() + + print(token.value) + response.write(f'') + + for item in token.value: + print(item) + response.write(handle_token(doc, item, False)) + response.write(f'
') + + response.seek(0) + return response.read() builddoc ={ "HEADER1": ("h2", None), @@ -35,13 +48,14 @@ builddoc ={ "IMG": (img, 'materialboxed center-align responsive-img'), "B": ("b", None), "U": ("u", None), - "i": ("i", None), + "I": ("i", None), + "V": ("code", None), "TEXT": (parse_text_html, "flow-text"), "SRC_BEGIN": (src, None), "EXAMPLE": ('blockquote', None), } -def handle_token(doc, item): +def handle_token(doc, item, root=False): response = StringIO() match = builddoc.get(item.token) if not match: @@ -52,7 +66,7 @@ def handle_token(doc, item): else: cls = '' if callable(tag): - return tag(doc, item, cls) + return tag(doc, item, cls, root=root) else: return '<%s%s>%s%s>\n' % (tag, cls, item.value, tag) @@ -60,6 +74,6 @@ def handle_token(doc, item): def html(doc): response = StringIO() for item in doc: - response.write(handle_token(doc, item)) + response.write(handle_token(doc, item, True)) response.seek(0) return response diff --git a/eorg/parser.py b/eorg/parser.py index f7a08e5..3574e19 100644 --- a/eorg/parser.py +++ b/eorg/parser.py @@ -152,7 +152,7 @@ def parse_text(txt): tokens.append(Token('IMG', [path, alt])) return '' - def emphasis(char, step, end='*', tag='b'): + def emphasis(char, step, end='*', tag='B'): if not char or char!=end: return char char = next(step, None) @@ -160,18 +160,17 @@ def parse_text(txt): while char and char not in [end] + ESCAPE: r += char char = next(step, None) - tokens.append(Token('b', r)) + tokens.append(Token(tag, r)) return '' - step = iter(txt) while char is not None: char = next(step, None) - char = emphasis(char, step, '*', 'b') - char = emphasis(char, step, '/', 'i') - char = emphasis(char, step, '_', 'u') - char = emphasis(char, step, '=', 'v') - char = emphasis(char, step, '~', 'pre') + char = emphasis(char, step, '*', 'B') + char = emphasis(char, step, '/', 'I') + char = emphasis(char, step, '_', 'U') + char = emphasis(char, step, '=', 'V') + char = emphasis(char, step, '~', 'PRE') char = img(char, step) if not char: continue diff --git a/tests/fixtures/test.org b/tests/fixtures/test.org index 748f23d..a10678a 100755 --- a/tests/fixtures/test.org +++ b/tests/fixtures/test.org @@ -8,8 +8,8 @@ * Header 1 ** Sub Header 1 -body text over -multiple lines +body =text= over +multiple *lines* ** Sub Header 2 * Header 2 diff --git a/tests/test_documents.py b/tests/test_documents.py index 1a458af..176061d 100755 --- a/tests/test_documents.py +++ b/tests/test_documents.py @@ -1,6 +1,7 @@ import os import pytest from eorg.parser import parse +from eorg.generate import html def test_basic(): @@ -15,3 +16,11 @@ def test_body(): with open(os.path.abspath("./tests/fixtures/test.org"), "r") as fp: doc = parse(fp) assert len([i for i in doc.body()]) > 0 + + + + +def test_html_output(): + with open(os.path.abspath("./tests/fixtures/test.org"), "r") as fp: + doc = parse(fp) + assert html(doc).read() == '' diff --git a/tests/test_html.py b/tests/test_html.py index 56f73d8..43404aa 100644 --- a/tests/test_html.py +++ b/tests/test_html.py @@ -7,14 +7,21 @@ from eorg.parser import parse_text def test_emphasis(): text = "parse emphasis *bold text* _underlined text_ /italic text/ normal text" - expected = [Token(token='TEXT', value='parse emphasis ' ), Token(token='b', value='bold text'), Token(token='TEXT', value=' ' ), Token(token='b', value='underlined text'), Token(token='TEXT', value=' ' ), Token(token='b', value='italic text'), Token('TEXT', ' normal text')] + expected = [Token(token='TEXT', value='parse emphasis ' ), Token(token='B', value='bold text'), Token(token='TEXT', value=' ' ), Token(token='U', value='underlined text'), Token(token='TEXT', value=' ' ), Token(token='I', value='italic text'), Token('TEXT', ' normal text')] result = parse_text(text) + assert result[0].token == 'TEXT' assert expected[0].value == result[0].value + assert result[1].token == 'B' assert expected[1].value == result[1].value + assert result[2].token == 'TEXT' assert expected[2].value == result[2].value + assert result[3].token == 'U' assert expected[3].value == result[3].value + assert result[4].token == 'TEXT' assert expected[4].value == result[4].value + assert result[5].token == 'I' assert expected[5].value == result[5].value + assert result[6].token == 'TEXT' assert expected[6].value == result[6].value