Compare commits
16 Commits
Author | SHA1 | Date |
---|---|---|
|
e212e38e8a | |
|
a6e26ea84f | |
|
cf783857ab | |
|
dad21c58f5 | |
|
a792ac1009 | |
|
e216e0a98a | |
|
9b61640ba5 | |
|
66109c06fd | |
|
534bc31d2a | |
|
b2ba2897bd | |
|
8f6c241914 | |
|
22e4e6b226 | |
|
bca6ee01e6 | |
|
ae9a504014 | |
|
d21e391f81 | |
|
81bf91532f |
29
.drone.yml
29
.drone.yml
|
@ -7,16 +7,33 @@ steps:
|
||||||
- env
|
- env
|
||||||
- python setup.py test
|
- python setup.py test
|
||||||
|
|
||||||
- name: deploy
|
# Auto build to pypi test
|
||||||
image: olymk2/drone-pypi
|
- name: deploy-test
|
||||||
environment:
|
image: registry.gitlab.com/olymk2/drone-pypi
|
||||||
|
pull: true
|
||||||
|
settings:
|
||||||
PYPI_USERNAME:
|
PYPI_USERNAME:
|
||||||
from_secret: PYPI_USERNAME
|
from_secret: PYPI_TEST_USERNAME
|
||||||
PYPI_PASSWORD:
|
PYPI_PASSWORD:
|
||||||
from_secret: PYPI_PASSWORD
|
from_secret: PYPI_TEST_PASSWORD
|
||||||
PYPI_REPOSITORY:
|
PYPI_REPOSITORY:
|
||||||
from_secret: PYPI_REPOSITORY
|
from_secret: PYPI_TEST_REPOSITORY
|
||||||
commands:
|
commands:
|
||||||
|
- env
|
||||||
|
- echo "__version__=$(date +'%y%m%d.%H%M')" > ./eorg/version.py
|
||||||
|
- python3 /bin/upload
|
||||||
|
|
||||||
|
- name: deploy
|
||||||
|
image: registry.gitlab.com/olymk2/drone-pypi
|
||||||
|
pull: true
|
||||||
|
settings:
|
||||||
|
PYPI_USERNAME:
|
||||||
|
from_secret: PYPI_LIVE_USERNAME
|
||||||
|
PYPI_PASSWORD:
|
||||||
|
from_secret: PYPI_LIVE_PASSWORD
|
||||||
|
PYPI_REPOSITORY:
|
||||||
|
from_secret: PYPI_LIVE_REPOSITORY
|
||||||
|
commands:
|
||||||
- env
|
- env
|
||||||
- echo "__version__=${DRONE_TAG}"
|
- echo "__version__=${DRONE_TAG}"
|
||||||
- echo "__version__=${DRONE_TAG}" > eorg/version.py
|
- echo "__version__=${DRONE_TAG}" > eorg/version.py
|
||||||
|
|
|
@ -34,16 +34,16 @@ t_SRC_BEGIN = r"^\#\+BEGIN_SRC\s+"
|
||||||
t_SRC_END = r"^\#\+END_SRC"
|
t_SRC_END = r"^\#\+END_SRC"
|
||||||
t_TABLE_START = r"^\s*\|"
|
t_TABLE_START = r"^\s*\|"
|
||||||
t_TABLE_END = r"^(?!\s*\|).*$"
|
t_TABLE_END = r"^(?!\s*\|).*$"
|
||||||
t_RESULTS_START = r"^\#\+RESULTS:"
|
t_RESULTS_START = r"^\#\+RESULTS\:"
|
||||||
t_CAPTIONS = r"^\#\+CAPTION:"
|
t_CAPTIONS = r"^\#\+CAPTION:"
|
||||||
t_NAME = r"^\#\+NAME:"
|
t_NAME = r"^\#\+NAME:"
|
||||||
# t_IMG = r"^\[\[(\w|\.|-|_|/)+\]\]$"
|
# t_IMG = r"^\[\[(\w|\.|-|_|/)+\]\]$"
|
||||||
t_IMG = r"^\[\["
|
t_IMG = r"^\[\["
|
||||||
t_IMG_END = r"\]\]"
|
t_IMG_END = r"\]\]"
|
||||||
t_RESULTS_END = r"^\:..*"
|
t_RESULTS_END = r"^\s*$"
|
||||||
t_END_LABELS = r"^(?!\[|\#).*"
|
t_END_LABELS = r"^(?!\[|\#).*"
|
||||||
t_BULLET_START = r"^\s*[\+|\-|0-9\.]"
|
t_BULLET_START = r"^\s*[\+|\-|0-9\.]"
|
||||||
t_BULLET_END = r"^\s*(?![\+|\-|0-9]).*$"
|
t_BULLET_END = r"^(?!\s*[\+|\-|0-9\.]).*$"
|
||||||
|
|
||||||
t_HEADER = r"^\*+"
|
t_HEADER = r"^\*+"
|
||||||
t_META_OTHER = r"^[#]\+[A-Z\_]+\:"
|
t_META_OTHER = r"^[#]\+[A-Z\_]+\:"
|
||||||
|
@ -68,7 +68,7 @@ TOKENS = {
|
||||||
tokens.BULLET: TokenStruct(
|
tokens.BULLET: TokenStruct(
|
||||||
start=t_BULLET_START, end=t_BULLET_END, start_pos=0
|
start=t_BULLET_START, end=t_BULLET_END, start_pos=0
|
||||||
),
|
),
|
||||||
tokens.RESULTS: TokenStruct(start=t_SRC_BEGIN, end=t_SRC_END),
|
tokens.RESULTS: TokenStruct(start=t_RESULTS_START, end=t_RESULTS_END),
|
||||||
tokens.HEADER: TokenStruct(start=t_HEADER, start_pos=1, count=True),
|
tokens.HEADER: TokenStruct(start=t_HEADER, start_pos=1, count=True),
|
||||||
tokens.META_OTHER: TokenStruct(
|
tokens.META_OTHER: TokenStruct(
|
||||||
start=t_META_OTHER, start_pos=2, end_pos=-1
|
start=t_META_OTHER, start_pos=2, end_pos=-1
|
||||||
|
|
|
@ -1,12 +1,9 @@
|
||||||
import re
|
import re
|
||||||
from html import escape
|
from html import escape
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
from eorg.const import Token, ESCAPE
|
|
||||||
from eorg import tokens
|
from eorg import tokens
|
||||||
from eorg.tokens import Token
|
|
||||||
from pygments import highlight
|
from pygments import highlight
|
||||||
from pygments.util import ClassNotFound
|
from pygments.util import ClassNotFound
|
||||||
from pygments.lexers import PythonLexer
|
|
||||||
from pygments.lexers import get_lexer_by_name
|
from pygments.lexers import get_lexer_by_name
|
||||||
from pygments.formatters import HtmlFormatter
|
from pygments.formatters import HtmlFormatter
|
||||||
|
|
||||||
|
@ -15,12 +12,11 @@ def src(doc, code, cls="", root=True):
|
||||||
try:
|
try:
|
||||||
lexer = get_lexer_by_name(code.attrs.get("language", "shell"))
|
lexer = get_lexer_by_name(code.attrs.get("language", "shell"))
|
||||||
except ClassNotFound as e:
|
except ClassNotFound as e:
|
||||||
lexer = get_lexer_by_name(code.attrs.get("language", "text"))
|
lexer = get_lexer_by_name("text")
|
||||||
return highlight(code.value, lexer, HtmlFormatter(linenos=True))
|
return highlight(code.value, lexer, HtmlFormatter(linenos=True))
|
||||||
|
|
||||||
|
|
||||||
def img(doc, item, cls="", root=True):
|
def img(doc, item, cls="", root=True):
|
||||||
caption = doc.previous(tokens.CAPTION)
|
|
||||||
text = ""
|
text = ""
|
||||||
if item.attrs:
|
if item.attrs:
|
||||||
caption = item.attrs.get('caption')
|
caption = item.attrs.get('caption')
|
||||||
|
@ -71,6 +67,19 @@ def parse_text_html(doc, token, cls="", root=True):
|
||||||
return f"{token.value}"
|
return f"{token.value}"
|
||||||
|
|
||||||
|
|
||||||
|
def results(doc, results, cls="", root=True):
|
||||||
|
result = ""
|
||||||
|
for token in results.value:
|
||||||
|
if token.token is tokens.IMAGE:
|
||||||
|
result += img(doc, token, cls, root=root)
|
||||||
|
return result
|
||||||
|
result += "<blockquote%s>%s</blockquote>\n" % (
|
||||||
|
cls,
|
||||||
|
escape(token.value).replace("\n", "<br />"),
|
||||||
|
)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
def blockquote(doc, token, cls="", root=True):
|
def blockquote(doc, token, cls="", root=True):
|
||||||
return "<blockquote%s>%s</blockquote>\n" % (
|
return "<blockquote%s>%s</blockquote>\n" % (
|
||||||
cls,
|
cls,
|
||||||
|
@ -121,7 +130,7 @@ builddoc = {
|
||||||
tokens.BULLET: (parse_bullets_html, "browser-default"),
|
tokens.BULLET: (parse_bullets_html, "browser-default"),
|
||||||
tokens.SOURCE: (src, None),
|
tokens.SOURCE: (src, None),
|
||||||
tokens.EXAMPLE: (blockquote, None),
|
tokens.EXAMPLE: (blockquote, None),
|
||||||
tokens.RESULTS: (blockquote, None),
|
tokens.RESULTS: (results, None),
|
||||||
tokens.TABLE: (table, "responsive-table striped"),
|
tokens.TABLE: (table, "responsive-table striped"),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,7 +156,9 @@ def handle_token(doc, item, root=False):
|
||||||
|
|
||||||
def html(doc):
|
def html(doc):
|
||||||
response = StringIO()
|
response = StringIO()
|
||||||
|
response.write('<div class="org-body">')
|
||||||
for item in doc:
|
for item in doc:
|
||||||
response.write(handle_token(doc, item, True))
|
response.write(handle_token(doc, item, True))
|
||||||
|
response.write('</div>')
|
||||||
response.seek(0)
|
response.seek(0)
|
||||||
return response
|
return response
|
||||||
|
|
|
@ -3,6 +3,18 @@ from eorg.tokens import Token
|
||||||
from eorg.const import ESCAPE, image_extensions
|
from eorg.const import ESCAPE, image_extensions
|
||||||
|
|
||||||
|
|
||||||
|
def emphasis(char, step, end, tag):
|
||||||
|
if not char or char != end:
|
||||||
|
return char, None
|
||||||
|
|
||||||
|
char = next(step, None)
|
||||||
|
r = ""
|
||||||
|
while char and char not in [end] + ESCAPE:
|
||||||
|
r += char
|
||||||
|
char = next(step, None)
|
||||||
|
return False, Token(tag, r)
|
||||||
|
|
||||||
|
|
||||||
def parse_img_or_link(char, step):
|
def parse_img_or_link(char, step):
|
||||||
if char != "[":
|
if char != "[":
|
||||||
return char, None
|
return char, None
|
||||||
|
@ -27,6 +39,8 @@ def parse_img_or_link(char, step):
|
||||||
char = next(step, None)
|
char = next(step, None)
|
||||||
char = next(step, None)
|
char = next(step, None)
|
||||||
|
|
||||||
|
if path.startswith('file:'):
|
||||||
|
path = path[5:]
|
||||||
if path.endswith(image_extensions):
|
if path.endswith(image_extensions):
|
||||||
return False, Token(tokens.IMAGE, [path, alt])
|
return False, Token(tokens.IMAGE, [path, alt])
|
||||||
|
|
||||||
|
|
103
eorg/parser.py
103
eorg/parser.py
|
@ -2,13 +2,9 @@ import re
|
||||||
from eorg import tokens
|
from eorg import tokens
|
||||||
from eorg.tokens import Token
|
from eorg.tokens import Token
|
||||||
from eorg.const import (
|
from eorg.const import (
|
||||||
TYPE_ATTRIBUTE,
|
TYPE_ATTRIBUTE, TOKENS, METADATA, ESCAPE, image_extensions
|
||||||
TOKENS,
|
|
||||||
METADATA,
|
|
||||||
ESCAPE,
|
|
||||||
image_extensions,
|
|
||||||
)
|
)
|
||||||
from eorg.helper import parse_img_or_link
|
from eorg.helper import parse_img_or_link, emphasis
|
||||||
|
|
||||||
|
|
||||||
class Document:
|
class Document:
|
||||||
|
@ -79,7 +75,7 @@ class Document:
|
||||||
yield item
|
yield item
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if item.token != tokens.LIST:
|
if item.token != tokens.LIST and item.token != tokens.RESULTS:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if isinstance(item.value, list):
|
if isinstance(item.value, list):
|
||||||
|
@ -109,18 +105,18 @@ def parsebody(text, rx):
|
||||||
match = re.search(rx, text)
|
match = re.search(rx, text)
|
||||||
if match:
|
if match:
|
||||||
return False, None
|
return False, None
|
||||||
|
|
||||||
return rx, text + "\n"
|
return rx, text + "\n"
|
||||||
|
|
||||||
|
|
||||||
def parseline(text, stream):
|
def parseline(text, stream):
|
||||||
attrs = None
|
attrs = None
|
||||||
for key, token in TOKENS.items():
|
for key, token in TOKENS.items():
|
||||||
print(token)
|
|
||||||
match = re.search(token.start, text)
|
match = re.search(token.start, text)
|
||||||
if not match:
|
if not match:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
value = text[match.end() :]
|
value = text[match.end():]
|
||||||
if token.type == TYPE_ATTRIBUTE:
|
if token.type == TYPE_ATTRIBUTE:
|
||||||
b, t = parseline(next(stream), stream)
|
b, t = parseline(next(stream), stream)
|
||||||
t.attrs = {token.key: value}
|
t.attrs = {token.key: value}
|
||||||
|
@ -158,63 +154,13 @@ def parseline(text, stream):
|
||||||
return False, Token(token=tokens.LIST, value=text + " ")
|
return False, Token(token=tokens.LIST, value=text + " ")
|
||||||
|
|
||||||
|
|
||||||
def parse_text(txt):
|
def parse_results(txt):
|
||||||
char = True
|
char = True
|
||||||
tokenlist = []
|
tokenlist = []
|
||||||
|
|
||||||
def img(char, step):
|
|
||||||
if char != "[":
|
|
||||||
return char
|
|
||||||
|
|
||||||
char = next(step, None)
|
|
||||||
|
|
||||||
if char != "[":
|
|
||||||
return char
|
|
||||||
|
|
||||||
char = next(step, None)
|
|
||||||
|
|
||||||
path = ""
|
|
||||||
while char not in ["]"] + ESCAPE:
|
|
||||||
path += char
|
|
||||||
char = next(step, None)
|
|
||||||
char = next(step, None)
|
|
||||||
|
|
||||||
alt = ""
|
|
||||||
if char == "[":
|
|
||||||
char = next(step, None)
|
|
||||||
while char not in ["]"] + ESCAPE:
|
|
||||||
alt += char
|
|
||||||
char = next(step, None)
|
|
||||||
char = next(step, None)
|
|
||||||
|
|
||||||
if path.endswith(image_extensions):
|
|
||||||
tokenlist.append(Token(tokens.IMAGE, [path, alt]))
|
|
||||||
return ""
|
|
||||||
|
|
||||||
tokenlist.append(Token(tokens.LINK, [path, alt]))
|
|
||||||
return ""
|
|
||||||
|
|
||||||
def emphasis(char, step, end, tag):
|
|
||||||
if not char or char != end:
|
|
||||||
return char
|
|
||||||
|
|
||||||
char = next(step, None)
|
|
||||||
r = ""
|
|
||||||
while char and char not in [end] + ESCAPE:
|
|
||||||
r += char
|
|
||||||
char = next(step, None)
|
|
||||||
tokenlist.append(Token(tag, r))
|
|
||||||
return ""
|
|
||||||
|
|
||||||
step = iter(txt)
|
step = iter(txt)
|
||||||
while char is not None:
|
while char is not None:
|
||||||
char = next(step, None)
|
char = next(step, None)
|
||||||
char = emphasis(char, step, "*", tokens.BOLD)
|
|
||||||
char = emphasis(char, step, "/", tokens.ITALIC)
|
|
||||||
char = emphasis(char, step, "_", tokens.UNDERLINED)
|
|
||||||
char = emphasis(char, step, "=", tokens.VERBATIM)
|
|
||||||
char = emphasis(char, step, "~", "PRE")
|
|
||||||
# char = img(char, step)
|
|
||||||
char, token = parse_img_or_link(char, step)
|
char, token = parse_img_or_link(char, step)
|
||||||
if token:
|
if token:
|
||||||
tokenlist.append(token)
|
tokenlist.append(token)
|
||||||
|
@ -233,6 +179,41 @@ def parse_text(txt):
|
||||||
return tokenlist
|
return tokenlist
|
||||||
|
|
||||||
|
|
||||||
|
def parse_text(txt):
|
||||||
|
char = True
|
||||||
|
tokenlist = []
|
||||||
|
|
||||||
|
def append(value):
|
||||||
|
char, token = value
|
||||||
|
if token:
|
||||||
|
tokenlist.append(token)
|
||||||
|
return char
|
||||||
|
|
||||||
|
step = iter(txt)
|
||||||
|
while char is not None:
|
||||||
|
char = next(step, None)
|
||||||
|
char = append(emphasis(char, step, "*", tokens.BOLD))
|
||||||
|
char = append(emphasis(char, step, "/", tokens.ITALIC))
|
||||||
|
char = append(emphasis(char, step, "_", tokens.UNDERLINED))
|
||||||
|
char = append(emphasis(char, step, "=", tokens.VERBATIM))
|
||||||
|
char = append(emphasis(char, step, "~", "PRE"))
|
||||||
|
char = append(parse_img_or_link(char, step))
|
||||||
|
|
||||||
|
if not char:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if len(tokenlist) == 0:
|
||||||
|
tokenlist.append(Token(tokens.TEXT, char))
|
||||||
|
continue
|
||||||
|
|
||||||
|
if tokenlist[-1].token != tokens.TEXT:
|
||||||
|
tokenlist.append(Token(tokens.TEXT, char))
|
||||||
|
continue
|
||||||
|
|
||||||
|
tokenlist[-1].value += char
|
||||||
|
return tokenlist
|
||||||
|
|
||||||
|
|
||||||
def nextline(stream):
|
def nextline(stream):
|
||||||
line = next(stream)
|
line = next(stream)
|
||||||
line = line.strip("\n")
|
line = line.strip("\n")
|
||||||
|
@ -262,4 +243,6 @@ def parse(stream):
|
||||||
|
|
||||||
for item in doc.filter(tokens.LIST):
|
for item in doc.filter(tokens.LIST):
|
||||||
item.value = parse_text(item.value)
|
item.value = parse_text(item.value)
|
||||||
|
for item in doc.filter(tokens.RESULTS):
|
||||||
|
item.value = parse_results(item.value)
|
||||||
return doc
|
return doc
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
__version__=0.81
|
__version__=0.85
|
||||||
|
|
|
@ -32,3 +32,17 @@ def test_block_settings():
|
||||||
assert result[1].value == expected[1].value
|
assert result[1].value == expected[1].value
|
||||||
assert result[1].attrs == expected[1].attrs
|
assert result[1].attrs == expected[1].attrs
|
||||||
#assert result == expected
|
#assert result == expected
|
||||||
|
|
||||||
|
|
||||||
|
def test_export_settings():
|
||||||
|
document = StringIO("""
|
||||||
|
#+TITLE: Tests
|
||||||
|
#+BEGIN_SRC shell :exports results
|
||||||
|
elcato create --path=/tmp/myblog
|
||||||
|
#+END_SRC
|
||||||
|
|
||||||
|
#+RESULTS:
|
||||||
|
result block here
|
||||||
|
""")
|
||||||
|
result = parse(document).doc
|
||||||
|
assert result[2].attrs.get('exports') == ['results']
|
||||||
|
|
|
@ -200,3 +200,50 @@ def test_bullet_block():
|
||||||
assert result[0].value == expected[0].value
|
assert result[0].value == expected[0].value
|
||||||
assert result[1].token == tokens.BULLET
|
assert result[1].token == tokens.BULLET
|
||||||
assert result[1].value == expected[1].value
|
assert result[1].value == expected[1].value
|
||||||
|
|
||||||
|
text = StringIO(
|
||||||
|
"""
|
||||||
|
- Bullet 1
|
||||||
|
- Bullet 2
|
||||||
|
-Bullet 3"""
|
||||||
|
)
|
||||||
|
|
||||||
|
expected = [
|
||||||
|
Token(tokens.BLANK, ""),
|
||||||
|
Token(tokens.BULLET, """- Bullet 1\n- Bullet 2\n -Bullet 3\n"""),
|
||||||
|
]
|
||||||
|
result = parse(text).doc
|
||||||
|
assert result[0].token == tokens.BLANK
|
||||||
|
assert result[0].value == expected[0].value
|
||||||
|
assert result[1].token == tokens.BULLET
|
||||||
|
assert result[1].value == expected[1].value
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skip
|
||||||
|
def test_src_block_images():
|
||||||
|
text = StringIO(
|
||||||
|
"""
|
||||||
|
#+BEGIN_SRC latex :exports results :file test.png :results raw file
|
||||||
|
\begin{equation}
|
||||||
|
x=\sqrt{b}
|
||||||
|
\end{equation}
|
||||||
|
#+END_SRC
|
||||||
|
|
||||||
|
#+RESULTS:
|
||||||
|
[[file:test.png]]
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
expected = [
|
||||||
|
Token(tokens.BLANK, ""),
|
||||||
|
Token(
|
||||||
|
tokens.SOURCE, """\begin{equation}\nx=\sqrt{b}\n\end{equation}"""
|
||||||
|
),
|
||||||
|
Token(tokens.BLANK, ""),
|
||||||
|
]
|
||||||
|
result = parse(text).doc
|
||||||
|
assert result[0].token == tokens.BLANK
|
||||||
|
assert result[0].value == expected[0].value
|
||||||
|
assert result[1].token == tokens.SOURCE
|
||||||
|
assert result[2].token == tokens.BLANK
|
||||||
|
assert result[3].value == expected[0].value
|
||||||
|
assert result[3].token == tokens.RESULTS
|
||||||
|
|
|
@ -24,7 +24,7 @@ def test_html_output():
|
||||||
print(htmlbody)
|
print(htmlbody)
|
||||||
assert (
|
assert (
|
||||||
htmlbody
|
htmlbody
|
||||||
== """<h2> Header 1</h2>
|
== """<div class="org-body"><h2> Header 1</h2>
|
||||||
<h3> Sub Header 1</h3>
|
<h3> Sub Header 1</h3>
|
||||||
<p class="flow-text">body <code>text</code>
|
<p class="flow-text">body <code>text</code>
|
||||||
over multiple <b>lines</b>
|
over multiple <b>lines</b>
|
||||||
|
@ -34,5 +34,5 @@ def test_html_output():
|
||||||
</pre></div>
|
</pre></div>
|
||||||
</td></tr></table><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="highlight"><pre><span></span><span class="p">(</span><span class="nv">test</span> <span class="nv">code</span><span class="p">)</span>
|
</td></tr></table><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="highlight"><pre><span></span><span class="p">(</span><span class="nv">test</span> <span class="nv">code</span><span class="p">)</span>
|
||||||
</pre></div>
|
</pre></div>
|
||||||
</td></tr></table>"""
|
</td></tr></table></div>"""
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,15 +1,36 @@
|
||||||
import os
|
|
||||||
import pytest
|
|
||||||
import snippets
|
import snippets
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
from eorg import tokens
|
from eorg import tokens
|
||||||
from eorg.tokens import Token
|
from eorg.tokens import Token
|
||||||
from eorg.parser import parse
|
from eorg.parser import parse
|
||||||
from eorg.parser import parse_text
|
|
||||||
from eorg.generate import html
|
from eorg.generate import html
|
||||||
|
|
||||||
|
|
||||||
def test_bullet_block():
|
def test_bullet_block():
|
||||||
expected = """<ul class="browser-default"><li class="collection-item">Bullet 1</li><li class="collection-item">Bullet 2</li></ul>"""
|
expected = """<div class="org-body"><ul class="browser-default"><li class="collection-item">Bullet 1</li><li class="collection-item">Bullet 2</li></ul></div>"""
|
||||||
result = html(parse(snippets.bullet_plus_snippet).doc)
|
result = html(parse(snippets.bullet_plus_snippet).doc)
|
||||||
assert result.read() == expected
|
assert result.read() == expected
|
||||||
|
|
||||||
|
|
||||||
|
def test_render_results():
|
||||||
|
text = StringIO(
|
||||||
|
"""
|
||||||
|
#+RESULTS:
|
||||||
|
[[file:test.png]]
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
expected = [
|
||||||
|
Token(tokens.IMAGE, ['test.png', ''], attrs=None),
|
||||||
|
Token(tokens.TEXT, "\n"),
|
||||||
|
]
|
||||||
|
doc = parse(text)
|
||||||
|
tags = doc.doc
|
||||||
|
assert tags[0].token == tokens.BLANK
|
||||||
|
assert len(tags[1].value) == len(expected)
|
||||||
|
assert tags[1].token == tokens.RESULTS
|
||||||
|
|
||||||
|
assert tags[1].value[0].value == expected[0].value
|
||||||
|
assert tags[1].value[1].value == expected[1].value
|
||||||
|
|
||||||
|
htmlbody = html(tags).read()
|
||||||
|
assert htmlbody == '<div class="org-body"><img style="margin:auto;" src="test.png" alt="" /></div>'
|
||||||
|
|
|
@ -55,6 +55,13 @@ def test_source():
|
||||||
assert match is not None
|
assert match is not None
|
||||||
|
|
||||||
|
|
||||||
|
def test_results():
|
||||||
|
text = "#+RESULTS:"
|
||||||
|
rx = const.t_RESULTS_START
|
||||||
|
match = re.search(rx, text)
|
||||||
|
assert match is not None
|
||||||
|
|
||||||
|
|
||||||
def test_bullets():
|
def test_bullets():
|
||||||
# invalid if no language specified
|
# invalid if no language specified
|
||||||
text = " + bullet 1"
|
text = " + bullet 1"
|
||||||
|
|
Loading…
Reference in New Issue