Refactor to recurse for emphasis needs styling changes.
This commit is contained in:
parent
ed02e1bea3
commit
cf15023da5
|
@ -21,11 +21,24 @@ def img(doc, item, cls=''):
|
||||||
return f'<img{cls} style="margin:auto;" src="{item.value[0]}" alt="{item.value[1]}" />{text}'
|
return f'<img{cls} style="margin:auto;" src="{item.value[0]}" alt="{item.value[1]}" />{text}'
|
||||||
|
|
||||||
|
|
||||||
def parse_text_html(doc, token, cls=''):
|
def parse_text_html(doc, token, cls='', root=True):
|
||||||
if isinstance(token.value, list):
|
if not isinstance(token.value, list):
|
||||||
for item in token.value:
|
return f'<p{cls}>{token.value}</p>'
|
||||||
return handle_token(doc, item)
|
|
||||||
return f'<p{cls}>{token.value}</p>'
|
print('test')
|
||||||
|
|
||||||
|
response = StringIO()
|
||||||
|
|
||||||
|
print(token.value)
|
||||||
|
response.write(f'<p{cls}>')
|
||||||
|
|
||||||
|
for item in token.value:
|
||||||
|
print(item)
|
||||||
|
response.write(handle_token(doc, item, False))
|
||||||
|
response.write(f'</p>')
|
||||||
|
|
||||||
|
response.seek(0)
|
||||||
|
return response.read()
|
||||||
|
|
||||||
builddoc ={
|
builddoc ={
|
||||||
"HEADER1": ("h2", None),
|
"HEADER1": ("h2", None),
|
||||||
|
@ -35,13 +48,14 @@ builddoc ={
|
||||||
"IMG": (img, 'materialboxed center-align responsive-img'),
|
"IMG": (img, 'materialboxed center-align responsive-img'),
|
||||||
"B": ("b", None),
|
"B": ("b", None),
|
||||||
"U": ("u", None),
|
"U": ("u", None),
|
||||||
"i": ("i", None),
|
"I": ("i", None),
|
||||||
|
"V": ("code", None),
|
||||||
"TEXT": (parse_text_html, "flow-text"),
|
"TEXT": (parse_text_html, "flow-text"),
|
||||||
"SRC_BEGIN": (src, None),
|
"SRC_BEGIN": (src, None),
|
||||||
"EXAMPLE": ('blockquote', None),
|
"EXAMPLE": ('blockquote', None),
|
||||||
}
|
}
|
||||||
|
|
||||||
def handle_token(doc, item):
|
def handle_token(doc, item, root=False):
|
||||||
response = StringIO()
|
response = StringIO()
|
||||||
match = builddoc.get(item.token)
|
match = builddoc.get(item.token)
|
||||||
if not match:
|
if not match:
|
||||||
|
@ -52,7 +66,7 @@ def handle_token(doc, item):
|
||||||
else:
|
else:
|
||||||
cls = ''
|
cls = ''
|
||||||
if callable(tag):
|
if callable(tag):
|
||||||
return tag(doc, item, cls)
|
return tag(doc, item, cls, root=root)
|
||||||
else:
|
else:
|
||||||
return '<%s%s>%s</%s>\n' % (tag, cls, item.value, tag)
|
return '<%s%s>%s</%s>\n' % (tag, cls, item.value, tag)
|
||||||
|
|
||||||
|
@ -60,6 +74,6 @@ def handle_token(doc, item):
|
||||||
def html(doc):
|
def html(doc):
|
||||||
response = StringIO()
|
response = StringIO()
|
||||||
for item in doc:
|
for item in doc:
|
||||||
response.write(handle_token(doc, item))
|
response.write(handle_token(doc, item, True))
|
||||||
response.seek(0)
|
response.seek(0)
|
||||||
return response
|
return response
|
||||||
|
|
|
@ -152,7 +152,7 @@ def parse_text(txt):
|
||||||
tokens.append(Token('IMG', [path, alt]))
|
tokens.append(Token('IMG', [path, alt]))
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
def emphasis(char, step, end='*', tag='b'):
|
def emphasis(char, step, end='*', tag='B'):
|
||||||
if not char or char!=end:
|
if not char or char!=end:
|
||||||
return char
|
return char
|
||||||
char = next(step, None)
|
char = next(step, None)
|
||||||
|
@ -160,18 +160,17 @@ def parse_text(txt):
|
||||||
while char and char not in [end] + ESCAPE:
|
while char and char not in [end] + ESCAPE:
|
||||||
r += char
|
r += char
|
||||||
char = next(step, None)
|
char = next(step, None)
|
||||||
tokens.append(Token('b', r))
|
tokens.append(Token(tag, r))
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
|
|
||||||
step = iter(txt)
|
step = iter(txt)
|
||||||
while char is not None:
|
while char is not None:
|
||||||
char = next(step, None)
|
char = next(step, None)
|
||||||
char = emphasis(char, step, '*', 'b')
|
char = emphasis(char, step, '*', 'B')
|
||||||
char = emphasis(char, step, '/', 'i')
|
char = emphasis(char, step, '/', 'I')
|
||||||
char = emphasis(char, step, '_', 'u')
|
char = emphasis(char, step, '_', 'U')
|
||||||
char = emphasis(char, step, '=', 'v')
|
char = emphasis(char, step, '=', 'V')
|
||||||
char = emphasis(char, step, '~', 'pre')
|
char = emphasis(char, step, '~', 'PRE')
|
||||||
char = img(char, step)
|
char = img(char, step)
|
||||||
if not char:
|
if not char:
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -8,8 +8,8 @@
|
||||||
|
|
||||||
* Header 1
|
* Header 1
|
||||||
** Sub Header 1
|
** Sub Header 1
|
||||||
body text over
|
body =text= over
|
||||||
multiple lines
|
multiple *lines*
|
||||||
** Sub Header 2
|
** Sub Header 2
|
||||||
* Header 2
|
* Header 2
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import os
|
import os
|
||||||
import pytest
|
import pytest
|
||||||
from eorg.parser import parse
|
from eorg.parser import parse
|
||||||
|
from eorg.generate import html
|
||||||
|
|
||||||
|
|
||||||
def test_basic():
|
def test_basic():
|
||||||
|
@ -15,3 +16,11 @@ def test_body():
|
||||||
with open(os.path.abspath("./tests/fixtures/test.org"), "r") as fp:
|
with open(os.path.abspath("./tests/fixtures/test.org"), "r") as fp:
|
||||||
doc = parse(fp)
|
doc = parse(fp)
|
||||||
assert len([i for i in doc.body()]) > 0
|
assert len([i for i in doc.body()]) > 0
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def test_html_output():
|
||||||
|
with open(os.path.abspath("./tests/fixtures/test.org"), "r") as fp:
|
||||||
|
doc = parse(fp)
|
||||||
|
assert html(doc).read() == ''
|
||||||
|
|
|
@ -7,14 +7,21 @@ from eorg.parser import parse_text
|
||||||
|
|
||||||
def test_emphasis():
|
def test_emphasis():
|
||||||
text = "parse emphasis *bold text* _underlined text_ /italic text/ normal text"
|
text = "parse emphasis *bold text* _underlined text_ /italic text/ normal text"
|
||||||
expected = [Token(token='TEXT', value='parse emphasis ' ), Token(token='b', value='bold text'), Token(token='TEXT', value=' ' ), Token(token='b', value='underlined text'), Token(token='TEXT', value=' ' ), Token(token='b', value='italic text'), Token('TEXT', ' normal text')]
|
expected = [Token(token='TEXT', value='parse emphasis ' ), Token(token='B', value='bold text'), Token(token='TEXT', value=' ' ), Token(token='U', value='underlined text'), Token(token='TEXT', value=' ' ), Token(token='I', value='italic text'), Token('TEXT', ' normal text')]
|
||||||
result = parse_text(text)
|
result = parse_text(text)
|
||||||
|
assert result[0].token == 'TEXT'
|
||||||
assert expected[0].value == result[0].value
|
assert expected[0].value == result[0].value
|
||||||
|
assert result[1].token == 'B'
|
||||||
assert expected[1].value == result[1].value
|
assert expected[1].value == result[1].value
|
||||||
|
assert result[2].token == 'TEXT'
|
||||||
assert expected[2].value == result[2].value
|
assert expected[2].value == result[2].value
|
||||||
|
assert result[3].token == 'U'
|
||||||
assert expected[3].value == result[3].value
|
assert expected[3].value == result[3].value
|
||||||
|
assert result[4].token == 'TEXT'
|
||||||
assert expected[4].value == result[4].value
|
assert expected[4].value == result[4].value
|
||||||
|
assert result[5].token == 'I'
|
||||||
assert expected[5].value == result[5].value
|
assert expected[5].value == result[5].value
|
||||||
|
assert result[6].token == 'TEXT'
|
||||||
assert expected[6].value == result[6].value
|
assert expected[6].value == result[6].value
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue