summaryrefslogtreecommitdiff
path: root/megapixels/app/site/parser.py
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/app/site/parser.py')
-rw-r--r--megapixels/app/site/parser.py36
1 files changed, 34 insertions, 2 deletions
diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py
index d78cc402..40d9c7f6 100644
--- a/megapixels/app/site/parser.py
+++ b/megapixels/app/site/parser.py
@@ -11,6 +11,10 @@ renderer = mistune.Renderer(escape=False)
markdown = mistune.Markdown(renderer=renderer)
def fix_images(lines, s3_path):
+ """
+ do our own tranformation of the markdown around images to handle wide images etc
+ lines: markdown lines
+ """
real_lines = []
block = "\n\n".join(lines)
for line in block.split("\n"):
@@ -29,6 +33,9 @@ def fix_images(lines, s3_path):
return "\n".join(real_lines)
def format_section(lines, s3_path, type=''):
+ """
+ format a normal markdown section
+ """
if len(lines):
lines = fix_images(lines, s3_path)
if type:
@@ -38,13 +45,16 @@ def format_section(lines, s3_path, type=''):
return ""
def format_metadata(section):
+ """
+ format a metadata section (+ key: value pairs)
+ """
meta = []
for line in section.split('\n'):
key, value = line[2:].split(': ', 1)
meta.append("<div><div class='gray'>{}</div><div>{}</div></div>".format(key, value))
return "<section><div class='meta'>{}</div></section>".format(''.join(meta))
-def format_applet(section):
+def format_applet(section, s3_path):
payload = section.replace('```', '').strip().split('\n')
applet = {}
if ': ' in payload[0]:
@@ -56,10 +66,15 @@ def format_applet(section):
if opt:
applet['opt'] = opt
if command == 'load file':
+ if opt[0] != '/':
+ applet['opt'] = s3_path + opt
applet['fields'] = payload[1]
return "<section><div class='applet' data-payload='{}'></div></section>".format(json.dumps(applet))
def parse_markdown(sections, s3_path, skip_h1=False):
+ """
+ parse page into sections, preprocess the markdown to handle our modifications
+ """
groups = []
current_group = []
for section in sections:
@@ -67,7 +82,7 @@ def parse_markdown(sections, s3_path, skip_h1=False):
continue
elif section.startswith('```'):
groups.append(format_section(current_group, s3_path))
- groups.append(format_applet(section))
+ groups.append(format_applet(section, s3_path))
current_group = []
elif section.startswith('+ '):
groups.append(format_section(current_group, s3_path))
@@ -88,6 +103,9 @@ def parse_markdown(sections, s3_path, skip_h1=False):
return content
def parse_research_index(research_posts):
+ """
+ Generate an index file for the research pages
+ """
content = "<div class='research_index'>"
for post in research_posts:
s3_path = s3.make_s3_path(cfg.S3_SITE_PATH, post['path'])
@@ -105,6 +123,9 @@ def parse_research_index(research_posts):
return content
def read_metadata(fn):
+ """
+ Read in read a markdown file and extract the metadata
+ """
with open(fn, "r") as file:
data = file.read()
data = data.replace("\n ", "\n")
@@ -128,6 +149,9 @@ default_metadata = {
}
def parse_metadata_section(metadata, section):
+ """
+ parse a metadata key: value pair
+ """
for line in section.split("\n"):
if ': ' not in line:
continue
@@ -135,6 +159,11 @@ def parse_metadata_section(metadata, section):
metadata[key.lower()] = value
def parse_metadata(fn, sections):
+ """
+ parse the metadata headers in a markdown file
+ (everything before the second ---------)
+ also generates appropriate urls for this page :)
+ """
found_meta = False
metadata = {}
valid_sections = []
@@ -175,6 +204,9 @@ def parse_metadata(fn, sections):
return metadata, valid_sections
def read_research_post_index():
+ """
+ Generate an index of the research (blog) posts
+ """
posts = []
for fn in sorted(glob.glob('../site/content/research/*/index.md')):
metadata, valid_sections = read_metadata(fn)