Work on improving results handling.
continuous-integration/drone/push Build is failing
Details
continuous-integration/drone/push Build is failing
Details
This commit is contained in:
parent
ae9a504014
commit
bca6ee01e6
|
@ -34,7 +34,7 @@ t_SRC_BEGIN = r"^\#\+BEGIN_SRC\s+"
|
|||
t_SRC_END = r"^\#\+END_SRC"
|
||||
t_TABLE_START = r"^\s*\|"
|
||||
t_TABLE_END = r"^(?!\s*\|).*$"
|
||||
t_RESULTS_START = r"^\#\+RESULTS:"
|
||||
t_RESULTS_START = r"^\#\+RESULTS\:"
|
||||
t_CAPTIONS = r"^\#\+CAPTION:"
|
||||
t_NAME = r"^\#\+NAME:"
|
||||
# t_IMG = r"^\[\[(\w|\.|-|_|/)+\]\]$"
|
||||
|
@ -68,7 +68,7 @@ TOKENS = {
|
|||
tokens.BULLET: TokenStruct(
|
||||
start=t_BULLET_START, end=t_BULLET_END, start_pos=0
|
||||
),
|
||||
tokens.RESULTS: TokenStruct(start=t_SRC_BEGIN, end=t_SRC_END),
|
||||
tokens.RESULTS: TokenStruct(start=t_RESULTS_START, end=t_RESULTS_END),
|
||||
tokens.HEADER: TokenStruct(start=t_HEADER, start_pos=1, count=True),
|
||||
tokens.META_OTHER: TokenStruct(
|
||||
start=t_META_OTHER, start_pos=2, end_pos=-1
|
||||
|
|
|
@ -69,10 +69,10 @@ def parse_text_html(doc, token, cls="", root=True):
|
|||
|
||||
|
||||
def results(doc, token, cls="", root=True):
|
||||
if token.value.startswith('file:'):
|
||||
if token.value.startswith('[[file:'):
|
||||
return "<img%s src=\"%s\"/>\n" % (
|
||||
cls,
|
||||
escape(token.value).replace("\n", "<br />"),
|
||||
escape(token.value.strip()[7:-2]),
|
||||
)
|
||||
|
||||
return "<blockquote%s>%s</blockquote>\n" % (
|
||||
|
@ -130,8 +130,8 @@ builddoc = {
|
|||
tokens.TEXT: (parse_text_html, "flow-text"),
|
||||
tokens.BULLET: (parse_bullets_html, "browser-default"),
|
||||
tokens.SOURCE: (src, None),
|
||||
tokens.EXAMPLE: (results, None),
|
||||
tokens.RESULTS: (blockquote, None),
|
||||
tokens.EXAMPLE: (blockquote, None),
|
||||
tokens.RESULTS: (results, None),
|
||||
tokens.TABLE: (table, "responsive-table striped"),
|
||||
}
|
||||
|
||||
|
|
|
@ -3,6 +3,18 @@ from eorg.tokens import Token
|
|||
from eorg.const import ESCAPE, image_extensions
|
||||
|
||||
|
||||
def emphasis(char, step, end, tag):
|
||||
if not char or char != end:
|
||||
return char, None
|
||||
|
||||
char = next(step, None)
|
||||
r = ""
|
||||
while char and char not in [end] + ESCAPE:
|
||||
r += char
|
||||
char = next(step, None)
|
||||
return False, Token(tag, r)
|
||||
|
||||
|
||||
def parse_img_or_link(char, step):
|
||||
if char != "[":
|
||||
return char, None
|
||||
|
|
100
eorg/parser.py
100
eorg/parser.py
|
@ -2,13 +2,9 @@ import re
|
|||
from eorg import tokens
|
||||
from eorg.tokens import Token
|
||||
from eorg.const import (
|
||||
TYPE_ATTRIBUTE,
|
||||
TOKENS,
|
||||
METADATA,
|
||||
ESCAPE,
|
||||
image_extensions,
|
||||
TYPE_ATTRIBUTE, TOKENS, METADATA, ESCAPE, image_extensions
|
||||
)
|
||||
from eorg.helper import parse_img_or_link
|
||||
from eorg.helper import parse_img_or_link, emphasis
|
||||
|
||||
|
||||
class Document:
|
||||
|
@ -109,6 +105,7 @@ def parsebody(text, rx):
|
|||
match = re.search(rx, text)
|
||||
if match:
|
||||
return False, None
|
||||
|
||||
return rx, text + "\n"
|
||||
|
||||
|
||||
|
@ -119,7 +116,7 @@ def parseline(text, stream):
|
|||
if not match:
|
||||
continue
|
||||
|
||||
value = text[match.end() :]
|
||||
value = text[match.end():]
|
||||
if token.type == TYPE_ATTRIBUTE:
|
||||
b, t = parseline(next(stream), stream)
|
||||
t.attrs = {token.key: value}
|
||||
|
@ -157,63 +154,13 @@ def parseline(text, stream):
|
|||
return False, Token(token=tokens.LIST, value=text + " ")
|
||||
|
||||
|
||||
def parse_text(txt):
|
||||
def parse_results(txt):
|
||||
char = True
|
||||
tokenlist = []
|
||||
|
||||
def img(char, step):
|
||||
if char != "[":
|
||||
return char
|
||||
|
||||
char = next(step, None)
|
||||
|
||||
if char != "[":
|
||||
return char
|
||||
|
||||
char = next(step, None)
|
||||
|
||||
path = ""
|
||||
while char not in ["]"] + ESCAPE:
|
||||
path += char
|
||||
char = next(step, None)
|
||||
char = next(step, None)
|
||||
|
||||
alt = ""
|
||||
if char == "[":
|
||||
char = next(step, None)
|
||||
while char not in ["]"] + ESCAPE:
|
||||
alt += char
|
||||
char = next(step, None)
|
||||
char = next(step, None)
|
||||
|
||||
if path.endswith(image_extensions):
|
||||
tokenlist.append(Token(tokens.IMAGE, [path, alt]))
|
||||
return ""
|
||||
|
||||
tokenlist.append(Token(tokens.LINK, [path, alt]))
|
||||
return ""
|
||||
|
||||
def emphasis(char, step, end, tag):
|
||||
if not char or char != end:
|
||||
return char
|
||||
|
||||
char = next(step, None)
|
||||
r = ""
|
||||
while char and char not in [end] + ESCAPE:
|
||||
r += char
|
||||
char = next(step, None)
|
||||
tokenlist.append(Token(tag, r))
|
||||
return ""
|
||||
|
||||
step = iter(txt)
|
||||
while char is not None:
|
||||
char = next(step, None)
|
||||
char = emphasis(char, step, "*", tokens.BOLD)
|
||||
char = emphasis(char, step, "/", tokens.ITALIC)
|
||||
char = emphasis(char, step, "_", tokens.UNDERLINED)
|
||||
char = emphasis(char, step, "=", tokens.VERBATIM)
|
||||
char = emphasis(char, step, "~", "PRE")
|
||||
# char = img(char, step)
|
||||
char, token = parse_img_or_link(char, step)
|
||||
if token:
|
||||
tokenlist.append(token)
|
||||
|
@ -232,6 +179,41 @@ def parse_text(txt):
|
|||
return tokenlist
|
||||
|
||||
|
||||
def parse_text(txt):
|
||||
char = True
|
||||
tokenlist = []
|
||||
|
||||
def append(value):
|
||||
char, token = value
|
||||
if token:
|
||||
tokenlist.append(token)
|
||||
return char
|
||||
|
||||
step = iter(txt)
|
||||
while char is not None:
|
||||
char = next(step, None)
|
||||
char = append(emphasis(char, step, "*", tokens.BOLD))
|
||||
char = append(emphasis(char, step, "/", tokens.ITALIC))
|
||||
char = append(emphasis(char, step, "_", tokens.UNDERLINED))
|
||||
char = append(emphasis(char, step, "=", tokens.VERBATIM))
|
||||
char = append(emphasis(char, step, "~", "PRE"))
|
||||
char = append(parse_img_or_link(char, step))
|
||||
|
||||
if not char:
|
||||
continue
|
||||
|
||||
if len(tokenlist) == 0:
|
||||
tokenlist.append(Token(tokens.TEXT, char))
|
||||
continue
|
||||
|
||||
if tokenlist[-1].token != tokens.TEXT:
|
||||
tokenlist.append(Token(tokens.TEXT, char))
|
||||
continue
|
||||
|
||||
tokenlist[-1].value += char
|
||||
return tokenlist
|
||||
|
||||
|
||||
def nextline(stream):
|
||||
line = next(stream)
|
||||
line = line.strip("\n")
|
||||
|
@ -261,4 +243,6 @@ def parse(stream):
|
|||
|
||||
for item in doc.filter(tokens.LIST):
|
||||
item.value = parse_text(item.value)
|
||||
for item in doc.filter(tokens.RESULTS):
|
||||
item.value = parse_results(item.value)
|
||||
return doc
|
||||
|
|
|
@ -1,11 +1,8 @@
|
|||
import os
|
||||
import pytest
|
||||
import snippets
|
||||
from io import StringIO
|
||||
from eorg import tokens
|
||||
from eorg.tokens import Token
|
||||
from eorg.parser import parse
|
||||
from eorg.parser import parse_text
|
||||
from eorg.generate import html
|
||||
|
||||
|
||||
|
@ -16,14 +13,23 @@ def test_bullet_block():
|
|||
|
||||
|
||||
def test_render_results():
|
||||
text = StringIO("""
|
||||
text = StringIO(
|
||||
"""
|
||||
#+RESULTS:
|
||||
[[file:test.png]]
|
||||
""")
|
||||
"""
|
||||
)
|
||||
expected = [
|
||||
Token(tokens.IMAGE, "['file:test.png', '']", attrs=None),
|
||||
Token(tokens.TEXT, "\n"),
|
||||
]
|
||||
doc = parse(text).doc
|
||||
assert doc[0].token == tokens.BLANK
|
||||
assert doc[1].value == ''
|
||||
assert len(doc[1].value) == len(expected)
|
||||
assert doc[1].token == tokens.RESULTS
|
||||
|
||||
assert doc[1].value[0] == expected[0]
|
||||
assert doc[1].value[1] == expected[1]
|
||||
|
||||
htmlbody = html(doc).read()
|
||||
assert htmlbody == '<img class="materialboxed center-align responsive-img" style="margin:auto;" src="file:test.png" alt="" />'
|
||||
assert htmlbody == '<img src="test.png"/>\n'
|
||||
|
|
|
@ -55,6 +55,13 @@ def test_source():
|
|||
assert match is not None
|
||||
|
||||
|
||||
def test_results():
|
||||
text = "#+RESULTS:"
|
||||
rx = const.t_RESULTS_START
|
||||
match = re.search(rx, text)
|
||||
assert match is not None
|
||||
|
||||
|
||||
def test_bullets():
|
||||
# invalid if no language specified
|
||||
text = " + bullet 1"
|
||||
|
|
Loading…
Reference in New Issue