From 9ffd78ca089d34e5f66e65b12a20821fbf8e47ad Mon Sep 17 00:00:00 2001 From: Oly Date: Wed, 17 Oct 2018 14:32:17 +0100 Subject: [PATCH] Start of simple html generator --- eorg/const.py | 4 ++-- eorg/generate.py | 32 +++++++++++++++++++++++++++++ eorg/html.py | 41 +++++++++++++++++++++++++++++++++++++ eorg/parser.py | 5 ++++- examples/raw/html-output.py | 10 +++++++++ 5 files changed, 89 insertions(+), 3 deletions(-) create mode 100644 eorg/generate.py create mode 100644 eorg/html.py create mode 100644 examples/raw/html-output.py diff --git a/eorg/const.py b/eorg/const.py index 8053317..55fa5a5 100755 --- a/eorg/const.py +++ b/eorg/const.py @@ -1,11 +1,11 @@ - +t_BLANK_LINE = '^\s*$' METADATA = ['TITLE', 'AUTHOR', 'EMAIL', 'DESCRIPTION', 'KEYWORDS'] t_META = r"^[#]\+(" + '|'.join(METADATA) +")\:" t_COMMENT_BEGIN = r"^\#\+BEGIN_COMMENT" t_COMMENT_END = r"^\#\+END_COMMENT" t_SRC_BEGIN = r"^\#\+BEGIN_SRC\s+" -t_SRC_END = r"[#]\+END_SRC\s+$" +t_SRC_END = r"^\#\+END_SRC" t_HEADER = r"^\*+" diff --git a/eorg/generate.py b/eorg/generate.py new file mode 100644 index 0000000..d141c5d --- /dev/null +++ b/eorg/generate.py @@ -0,0 +1,32 @@ +from io import StringIO +from pygments import highlight +from pygments.lexers import PythonLexer +from pygments.lexers import get_lexer_by_name +from pygments.formatters import HtmlFormatter + +def src(code): + lexer = get_lexer_by_name('lisp') + return highlight(code, lexer, HtmlFormatter()) + +builddoc ={ + "HEADER1": "h2", + "HEADER2": "h3", + "HEADER3": "h4", + "BREAK": "br", + "TEXT": "p", + "SRC_BEGIN": src, +} + +def html(doc): + response = StringIO() + for item in doc: + tag = builddoc.get(item.token) + if not tag: + continue + if callable(tag): + response.write(tag(item.value)) + continue + else: + response.write('<%s>%s<%s/>\n' % (tag, item.value, tag)) + response.seek(0) + return response diff --git a/eorg/html.py b/eorg/html.py new file mode 100644 index 0000000..bc621b6 --- /dev/null +++ b/eorg/html.py @@ -0,0 +1,41 @@ +from io import StringIO +from pygments import highlight +from pygments.lexers import PythonLexer +from pygments.lexers import get_lexer_by_name +from pygments.formatters import HtmlFormatter + +code = 'print "Hello World"' +def src(code): + lexer = get_lexer_by_name('lisp') + return highlight(code, lexer, HtmlFormatter()) + + +builddoc ={ +# "TITLE": "h1", +# "EMAIL": "h1", +# "AUTHOR": "h1", + "HEADER1": "h2", + "HEADER2": "h3", + "HEADER3": "h4", + "BREAK": "br", + "TEXT": "p", + "SRC_BEGIN": src, +# "COMMENT": "pre", +} + + + +def generate(doc): + response = StringIO() + for item in doc: + print(item) + tag = builddoc.get(item.token) + if not tag: + continue + if callable(tag): + response.write(tag(item.value)) + continue + else: + response.write('<%s>%s<%s/>\n' % (tag, item.value, tag)) + response.seek(0) + return response diff --git a/eorg/parser.py b/eorg/parser.py index 8d7ac86..97d9d6c 100644 --- a/eorg/parser.py +++ b/eorg/parser.py @@ -75,7 +75,10 @@ def parseline(text): Token(token=match.group(0)[s:e], value=text[match.end() :]), ) return block, Token(token=key, value=text[match.end() :]) - return False, Token(token='TEXT', value=text) + text = text.strip() + if text == '': + return False, Token(token='BREAK', value=text) + return '^\s*$', Token(token='TEXT', value=text + ' ') def parse(stream): diff --git a/examples/raw/html-output.py b/examples/raw/html-output.py new file mode 100644 index 0000000..26f96bb --- /dev/null +++ b/examples/raw/html-output.py @@ -0,0 +1,10 @@ +import os +from eorg.parser import parse +from eorg.html import generate + +doc=[] +with open(os.path.abspath('../../tests/fixtures/test.org'), 'r') as fp: + doc = parse(fp) + +print('#################') +print(generate(doc).read())