Fix up block parsing and better handling of emphasis
This commit is contained in:
parent
84ae1933a1
commit
c616c3f712
|
@ -5,77 +5,74 @@ from pygments.lexers import PythonLexer
|
|||
from pygments.lexers import get_lexer_by_name
|
||||
from pygments.formatters import HtmlFormatter
|
||||
|
||||
def src(doc, code, cls='', root=True):
|
||||
|
||||
def src(doc, code, cls="", root=True):
|
||||
try:
|
||||
lexer = get_lexer_by_name(code.attrs.get('language', 'shell'))
|
||||
lexer = get_lexer_by_name(code.attrs.get("language", "shell"))
|
||||
except pygments.util.ClassNotFound as e:
|
||||
lexer = get_lexer_by_name(code.attrs.get('language', 'text'))
|
||||
lexer = get_lexer_by_name(code.attrs.get("language", "text"))
|
||||
|
||||
return highlight(code.value, lexer, HtmlFormatter(linenos=True))
|
||||
|
||||
def img(doc, item, cls='', root=True):
|
||||
caption = doc.previous('CAPTION')
|
||||
text = ''
|
||||
|
||||
def img(doc, item, cls="", root=True):
|
||||
caption = doc.previous("CAPTION")
|
||||
text = ""
|
||||
if caption:
|
||||
text = f'<p class="center-align">{caption.value}</p>'
|
||||
return f'<img{cls} style="margin:auto;" src="{item.value[0]}" alt="{item.value[1]}" />{text}'
|
||||
|
||||
|
||||
def parse_text_html(doc, token, cls='', root=True):
|
||||
print('test')
|
||||
print(token)
|
||||
# if its the start of a text body wrap html tags
|
||||
# else more complicated so return the tags
|
||||
if isinstance(token.value, str):
|
||||
if root is True:
|
||||
return f'<p{cls}>{token.value}</p>'
|
||||
return f'{token.value}'
|
||||
|
||||
|
||||
def parse_list_html(doc, token, cls="", root=True):
|
||||
response = StringIO()
|
||||
|
||||
print(token.value)
|
||||
response.write(f'<p{cls}>')
|
||||
response.write(f"<p{cls}>")
|
||||
|
||||
for item in token.value:
|
||||
print(item)
|
||||
response.write(handle_token(doc, item, False))
|
||||
response.write(f'</p>')
|
||||
|
||||
response.seek(0)
|
||||
print(response.read())
|
||||
response.write(f"</p>")
|
||||
response.seek(0)
|
||||
return response.read()
|
||||
|
||||
builddoc ={
|
||||
|
||||
def parse_text_html(doc, token, cls="", root=True):
|
||||
# if its the start of a text body wrap html tags
|
||||
# else more complicated so return the tags
|
||||
#if root is True:
|
||||
# return f"<p{cls}>{token.value}</p>"
|
||||
return f"{token.value}"
|
||||
|
||||
|
||||
builddoc = {
|
||||
"HEADER1": ("h2", None),
|
||||
"HEADER2": ("h3", None),
|
||||
"HEADER3": ("h4", None),
|
||||
# "BREAK": "br",
|
||||
"IMG": (img, 'materialboxed center-align responsive-img'),
|
||||
# "BREAK": "br",
|
||||
"IMG": (img, "materialboxed center-align responsive-img"),
|
||||
"B": ("b", None),
|
||||
"U": ("u", None),
|
||||
"I": ("i", None),
|
||||
"V": ("code", None),
|
||||
"LIST": (parse_list_html, "flow-text"),
|
||||
"TEXT": (parse_text_html, "flow-text"),
|
||||
"SRC_BEGIN": (src, None),
|
||||
"EXAMPLE": ('blockquote', None),
|
||||
"EXAMPLE": ("blockquote", None),
|
||||
}
|
||||
|
||||
|
||||
def handle_token(doc, item, root=False):
|
||||
response = StringIO()
|
||||
match = builddoc.get(item.token)
|
||||
if not match:
|
||||
return ''
|
||||
return ""
|
||||
tag, cls = match
|
||||
if cls:
|
||||
cls = f' class="{cls}"'
|
||||
else:
|
||||
cls = ''
|
||||
cls = ""
|
||||
if callable(tag):
|
||||
return tag(doc, item, cls, root=root)
|
||||
else:
|
||||
return '<%s%s>%s</%s>\n' % (tag, cls, item.value, tag)
|
||||
return "<%s%s>%s</%s>\n" % (tag, cls, item.value, tag)
|
||||
|
||||
|
||||
def html(doc):
|
||||
|
|
|
@ -95,9 +95,8 @@ def parse_attrs(text):
|
|||
def parsebody(text, rx):
|
||||
match = re.search(rx, text)
|
||||
if match:
|
||||
return False, ""
|
||||
else:
|
||||
return rx, text + "\n"
|
||||
return False, None
|
||||
return rx, text + "\n"
|
||||
|
||||
def parseline(text):
|
||||
for key, (rx, block, s, e, count) in TOKENS.items():
|
||||
|
@ -119,7 +118,7 @@ def parseline(text):
|
|||
text = text.strip()
|
||||
if text == "":
|
||||
return False, Token(token="BREAK", value=text)
|
||||
return False, Token(token="TEXT", value=text + " ")
|
||||
return False, Token(token="LIST", value=text + " ")
|
||||
|
||||
|
||||
def parse_text(txt):
|
||||
|
@ -191,17 +190,16 @@ def parse(stream):
|
|||
line = line.strip('\n')
|
||||
if block is not False:
|
||||
block, token = parsebody(line, block)
|
||||
if token:
|
||||
#block = result[0]
|
||||
if block:
|
||||
doc.update(token)
|
||||
continue
|
||||
continue
|
||||
block, token = parseline(line)
|
||||
if token:
|
||||
if doc.token() == "TEXT" and token.token == "TEXT":
|
||||
if doc.token() == "LIST" and token.token == "LIST":
|
||||
doc.update(token.value)
|
||||
continue
|
||||
doc.append(token)
|
||||
|
||||
for item in doc.filter('TEXT'):
|
||||
for item in doc.filter('LIST'):
|
||||
item.value = parse_text(item.value)
|
||||
return doc
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
__version__ = 0.4
|
||||
__version__ = 0.5
|
||||
|
||||
|
|
|
@ -23,4 +23,16 @@ def test_body():
|
|||
def test_html_output():
|
||||
with open(os.path.abspath("./tests/fixtures/test.org"), "r") as fp:
|
||||
doc = parse(fp)
|
||||
assert html(doc).read() == ''
|
||||
htmlbody = html(doc).read()
|
||||
print(htmlbody)
|
||||
assert htmlbody == """<p class="flow-text">#+DATE: jkkj </p><h2> Header 1</h2>
|
||||
<h3> Sub Header 1</h3>
|
||||
<p class="flow-text">body <code>text</code>
|
||||
over multiple <b>lines</b>
|
||||
</p><h3> Sub Header 2</h3>
|
||||
<h2> Header 2</h2>
|
||||
<table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="highlight"><pre><span></span><span class="p">(</span><span class="nv">some</span> <span class="nv">lispy</span> <span class="nv">code</span><span class="p">)</span>
|
||||
</pre></div>
|
||||
</td></tr></table><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="highlight"><pre><span></span><span class="p">(</span><span class="nv">test</span> <span class="nv">code</span><span class="p">)</span>
|
||||
</pre></div>
|
||||
</td></tr></table>"""
|
||||
|
|
Loading…
Reference in New Issue