Work on improving results handling.
continuous-integration/drone/push Build is failing Details

This commit is contained in:
Oly 2018-12-06 14:16:01 +00:00
parent ae9a504014
commit bca6ee01e6
6 changed files with 80 additions and 71 deletions

View File

@ -34,7 +34,7 @@ t_SRC_BEGIN = r"^\#\+BEGIN_SRC\s+"
t_SRC_END = r"^\#\+END_SRC" t_SRC_END = r"^\#\+END_SRC"
t_TABLE_START = r"^\s*\|" t_TABLE_START = r"^\s*\|"
t_TABLE_END = r"^(?!\s*\|).*$" t_TABLE_END = r"^(?!\s*\|).*$"
t_RESULTS_START = r"^\#\+RESULTS:" t_RESULTS_START = r"^\#\+RESULTS\:"
t_CAPTIONS = r"^\#\+CAPTION:" t_CAPTIONS = r"^\#\+CAPTION:"
t_NAME = r"^\#\+NAME:" t_NAME = r"^\#\+NAME:"
# t_IMG = r"^\[\[(\w|\.|-|_|/)+\]\]$" # t_IMG = r"^\[\[(\w|\.|-|_|/)+\]\]$"
@ -68,7 +68,7 @@ TOKENS = {
tokens.BULLET: TokenStruct( tokens.BULLET: TokenStruct(
start=t_BULLET_START, end=t_BULLET_END, start_pos=0 start=t_BULLET_START, end=t_BULLET_END, start_pos=0
), ),
tokens.RESULTS: TokenStruct(start=t_SRC_BEGIN, end=t_SRC_END), tokens.RESULTS: TokenStruct(start=t_RESULTS_START, end=t_RESULTS_END),
tokens.HEADER: TokenStruct(start=t_HEADER, start_pos=1, count=True), tokens.HEADER: TokenStruct(start=t_HEADER, start_pos=1, count=True),
tokens.META_OTHER: TokenStruct( tokens.META_OTHER: TokenStruct(
start=t_META_OTHER, start_pos=2, end_pos=-1 start=t_META_OTHER, start_pos=2, end_pos=-1

View File

@ -69,10 +69,10 @@ def parse_text_html(doc, token, cls="", root=True):
def results(doc, token, cls="", root=True): def results(doc, token, cls="", root=True):
if token.value.startswith('file:'): if token.value.startswith('[[file:'):
return "<img%s src=\"%s\"/>\n" % ( return "<img%s src=\"%s\"/>\n" % (
cls, cls,
escape(token.value).replace("\n", "<br />"), escape(token.value.strip()[7:-2]),
) )
return "<blockquote%s>%s</blockquote>\n" % ( return "<blockquote%s>%s</blockquote>\n" % (
@ -130,8 +130,8 @@ builddoc = {
tokens.TEXT: (parse_text_html, "flow-text"), tokens.TEXT: (parse_text_html, "flow-text"),
tokens.BULLET: (parse_bullets_html, "browser-default"), tokens.BULLET: (parse_bullets_html, "browser-default"),
tokens.SOURCE: (src, None), tokens.SOURCE: (src, None),
tokens.EXAMPLE: (results, None), tokens.EXAMPLE: (blockquote, None),
tokens.RESULTS: (blockquote, None), tokens.RESULTS: (results, None),
tokens.TABLE: (table, "responsive-table striped"), tokens.TABLE: (table, "responsive-table striped"),
} }

View File

@ -3,6 +3,18 @@ from eorg.tokens import Token
from eorg.const import ESCAPE, image_extensions from eorg.const import ESCAPE, image_extensions
def emphasis(char, step, end, tag):
if not char or char != end:
return char, None
char = next(step, None)
r = ""
while char and char not in [end] + ESCAPE:
r += char
char = next(step, None)
return False, Token(tag, r)
def parse_img_or_link(char, step): def parse_img_or_link(char, step):
if char != "[": if char != "[":
return char, None return char, None

View File

@ -2,13 +2,9 @@ import re
from eorg import tokens from eorg import tokens
from eorg.tokens import Token from eorg.tokens import Token
from eorg.const import ( from eorg.const import (
TYPE_ATTRIBUTE, TYPE_ATTRIBUTE, TOKENS, METADATA, ESCAPE, image_extensions
TOKENS,
METADATA,
ESCAPE,
image_extensions,
) )
from eorg.helper import parse_img_or_link from eorg.helper import parse_img_or_link, emphasis
class Document: class Document:
@ -109,6 +105,7 @@ def parsebody(text, rx):
match = re.search(rx, text) match = re.search(rx, text)
if match: if match:
return False, None return False, None
return rx, text + "\n" return rx, text + "\n"
@ -119,7 +116,7 @@ def parseline(text, stream):
if not match: if not match:
continue continue
value = text[match.end() :] value = text[match.end():]
if token.type == TYPE_ATTRIBUTE: if token.type == TYPE_ATTRIBUTE:
b, t = parseline(next(stream), stream) b, t = parseline(next(stream), stream)
t.attrs = {token.key: value} t.attrs = {token.key: value}
@ -157,63 +154,13 @@ def parseline(text, stream):
return False, Token(token=tokens.LIST, value=text + " ") return False, Token(token=tokens.LIST, value=text + " ")
def parse_text(txt): def parse_results(txt):
char = True char = True
tokenlist = [] tokenlist = []
def img(char, step):
if char != "[":
return char
char = next(step, None)
if char != "[":
return char
char = next(step, None)
path = ""
while char not in ["]"] + ESCAPE:
path += char
char = next(step, None)
char = next(step, None)
alt = ""
if char == "[":
char = next(step, None)
while char not in ["]"] + ESCAPE:
alt += char
char = next(step, None)
char = next(step, None)
if path.endswith(image_extensions):
tokenlist.append(Token(tokens.IMAGE, [path, alt]))
return ""
tokenlist.append(Token(tokens.LINK, [path, alt]))
return ""
def emphasis(char, step, end, tag):
if not char or char != end:
return char
char = next(step, None)
r = ""
while char and char not in [end] + ESCAPE:
r += char
char = next(step, None)
tokenlist.append(Token(tag, r))
return ""
step = iter(txt) step = iter(txt)
while char is not None: while char is not None:
char = next(step, None) char = next(step, None)
char = emphasis(char, step, "*", tokens.BOLD)
char = emphasis(char, step, "/", tokens.ITALIC)
char = emphasis(char, step, "_", tokens.UNDERLINED)
char = emphasis(char, step, "=", tokens.VERBATIM)
char = emphasis(char, step, "~", "PRE")
# char = img(char, step)
char, token = parse_img_or_link(char, step) char, token = parse_img_or_link(char, step)
if token: if token:
tokenlist.append(token) tokenlist.append(token)
@ -232,6 +179,41 @@ def parse_text(txt):
return tokenlist return tokenlist
def parse_text(txt):
char = True
tokenlist = []
def append(value):
char, token = value
if token:
tokenlist.append(token)
return char
step = iter(txt)
while char is not None:
char = next(step, None)
char = append(emphasis(char, step, "*", tokens.BOLD))
char = append(emphasis(char, step, "/", tokens.ITALIC))
char = append(emphasis(char, step, "_", tokens.UNDERLINED))
char = append(emphasis(char, step, "=", tokens.VERBATIM))
char = append(emphasis(char, step, "~", "PRE"))
char = append(parse_img_or_link(char, step))
if not char:
continue
if len(tokenlist) == 0:
tokenlist.append(Token(tokens.TEXT, char))
continue
if tokenlist[-1].token != tokens.TEXT:
tokenlist.append(Token(tokens.TEXT, char))
continue
tokenlist[-1].value += char
return tokenlist
def nextline(stream): def nextline(stream):
line = next(stream) line = next(stream)
line = line.strip("\n") line = line.strip("\n")
@ -261,4 +243,6 @@ def parse(stream):
for item in doc.filter(tokens.LIST): for item in doc.filter(tokens.LIST):
item.value = parse_text(item.value) item.value = parse_text(item.value)
for item in doc.filter(tokens.RESULTS):
item.value = parse_results(item.value)
return doc return doc

View File

@ -1,11 +1,8 @@
import os
import pytest
import snippets import snippets
from io import StringIO from io import StringIO
from eorg import tokens from eorg import tokens
from eorg.tokens import Token from eorg.tokens import Token
from eorg.parser import parse from eorg.parser import parse
from eorg.parser import parse_text
from eorg.generate import html from eorg.generate import html
@ -16,14 +13,23 @@ def test_bullet_block():
def test_render_results(): def test_render_results():
text = StringIO(""" text = StringIO(
"""
#+RESULTS: #+RESULTS:
[[file:test.png]] [[file:test.png]]
""") """
)
expected = [
Token(tokens.IMAGE, "['file:test.png', '']", attrs=None),
Token(tokens.TEXT, "\n"),
]
doc = parse(text).doc doc = parse(text).doc
assert doc[0].token == tokens.BLANK assert doc[0].token == tokens.BLANK
assert doc[1].value == '' assert len(doc[1].value) == len(expected)
assert doc[1].token == tokens.RESULTS assert doc[1].token == tokens.RESULTS
assert doc[1].value[0] == expected[0]
assert doc[1].value[1] == expected[1]
htmlbody = html(doc).read() htmlbody = html(doc).read()
assert htmlbody == '<img class="materialboxed center-align responsive-img" style="margin:auto;" src="file:test.png" alt="" />' assert htmlbody == '<img src="test.png"/>\n'

View File

@ -55,6 +55,13 @@ def test_source():
assert match is not None assert match is not None
def test_results():
text = "#+RESULTS:"
rx = const.t_RESULTS_START
match = re.search(rx, text)
assert match is not None
def test_bullets(): def test_bullets():
# invalid if no language specified # invalid if no language specified
text = " + bullet 1" text = " + bullet 1"