summaryrefslogtreecommitdiff
path: root/builder
diff options
context:
space:
mode:
Diffstat (limited to 'builder')
-rw-r--r--builder/README.md21
-rw-r--r--builder/__init__.py0
-rw-r--r--builder/builder.py90
-rw-r--r--builder/parser.py172
-rw-r--r--builder/paths.py6
-rw-r--r--builder/s3.py61
6 files changed, 350 insertions, 0 deletions
diff --git a/builder/README.md b/builder/README.md
new file mode 100644
index 00000000..1a6d3a1e
--- /dev/null
+++ b/builder/README.md
@@ -0,0 +1,21 @@
+Megapixels Static Site Generator
+================================
+
+The index, blog, and about other pages are built using this static site generator.
+
+## Metadata
+
+```
+status: published|draft|private
+title: From 1 to 100 Pixels
+desc: High resolution insights from low resolution imagery
+slug: from-1-to-100-pixels
+published: 2018-12-04
+updated: 2018-12-04
+authors: Adam Harvey, Berit Gilma, Matthew Stender
+```
+
+## S3 Assets
+
+Static assets: `v1/site/about/assets/picture.jpg`
+Dataset assets: `v1/datasets/lfw/assets/picture.jpg`
diff --git a/builder/__init__.py b/builder/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/builder/__init__.py
diff --git a/builder/builder.py b/builder/builder.py
new file mode 100644
index 00000000..620fc710
--- /dev/null
+++ b/builder/builder.py
@@ -0,0 +1,90 @@
+#!/usr/bin/python
+
+from dotenv import load_dotenv
+load_dotenv()
+
+import os
+import glob
+from jinja2 import Environment, FileSystemLoader, select_autoescape
+
+import s3
+import parser
+from paths import *
+
+env = Environment(
+ loader=FileSystemLoader(template_path),
+ autoescape=select_autoescape([])
+)
+
+def build_page(fn, research_posts):
+ metadata, sections = parser.read_metadata(fn)
+
+ if metadata is None:
+ print("{} has no metadata".format(fn))
+ return
+
+ print(metadata['url'])
+
+ dirname = os.path.dirname(fn)
+ output_path = public_path + metadata['url']
+ output_fn = os.path.join(output_path, "index.html")
+
+ skip_h1 = False
+
+ if metadata['url'] == '/':
+ template = env.get_template("home.html")
+ elif 'research/' in fn:
+ skip_h1 = True
+ template = env.get_template("research.html")
+ else:
+ template = env.get_template("page.html")
+
+ if 'datasets/' in fn:
+ s3_dir = s3_datasets_path
+ else:
+ s3_dir = s3_site_path
+
+ s3_path = s3.make_s3_path(s3_dir, metadata['path'])
+
+ if 'index.md' in fn:
+ s3.sync_directory(dirname, s3_dir, metadata)
+
+ content = parser.parse_markdown(sections, s3_path, skip_h1=skip_h1)
+
+ html = template.render(
+ metadata=metadata,
+ content=content,
+ research_posts=research_posts,
+ latest_research_post=research_posts[-1],
+ )
+
+ os.makedirs(output_path, exist_ok=True)
+ with open(output_fn, "w") as file:
+ file.write(html)
+
+ print("______")
+
+def build_research_index(research_posts):
+ metadata, sections = parser.read_metadata('../site/content/research/index.md')
+ template = env.get_template("page.html")
+ s3_path = s3.make_s3_path(s3_site_path, metadata['path'])
+ content = parser.parse_markdown(sections, s3_path, skip_h1=False)
+ content += parser.parse_research_index(research_posts)
+ html = template.render(
+ metadata=metadata,
+ content=content,
+ research_posts=research_posts,
+ latest_research_post=research_posts[-1],
+ )
+ output_fn = public_path + '/research/index.html'
+ with open(output_fn, "w") as file:
+ file.write(html)
+
+def build_site():
+ research_posts = parser.read_research_post_index()
+ for fn in glob.iglob(os.path.join(content_path, "**/*.md"), recursive=True):
+ build_page(fn, research_posts)
+ build_research_index(research_posts)
+
+if __name__ == '__main__':
+ build_site()
diff --git a/builder/parser.py b/builder/parser.py
new file mode 100644
index 00000000..dd3643bf
--- /dev/null
+++ b/builder/parser.py
@@ -0,0 +1,172 @@
+import os
+import re
+import glob
+import mistune
+
+import s3
+from paths import *
+
+renderer = mistune.Renderer(escape=False)
+markdown = mistune.Markdown(renderer=renderer)
+
+def fix_images(lines, s3_path):
+ real_lines = []
+ block = "\n\n".join(lines)
+ for line in block.split("\n"):
+ if "![" in line:
+ line = line.replace('![', '')
+ alt_text, tail = line.split('](', 1)
+ url, tail = tail.split(')', 1)
+ if ':' in alt_text:
+ tail, alt_text = alt_text.split(':', 1)
+ img_tag = "<img src='{}' alt='{}'>".format(s3_path + url, alt_text.replace("'", ""))
+ if len(alt_text):
+ line = "<div class='image'>{}<div class='caption'>{}</div></div>".format(img_tag, alt_text)
+ else:
+ line = "<div class='image'>{}</div>".format(img_tag, alt_text)
+ real_lines.append(line)
+ return "\n".join(real_lines)
+
+def format_section(lines, s3_path, type=''):
+ if len(lines):
+ lines = fix_images(lines, s3_path)
+ if type:
+ return "<section class='{}'>{}</section>".format(type, markdown(lines))
+ else:
+ return "<section>" + markdown(lines) + "</section>"
+ return ""
+
+def format_metadata(section):
+ meta = []
+ for line in section.split('\n'):
+ key, value = line[2:].split(': ', 1)
+ meta.append("<div><div class='gray'>{}</div><div>{}</div></div>".format(key, value))
+ return "<section><div class='meta'>{}</div></section>".format(''.join(meta))
+
+def parse_markdown(sections, s3_path, skip_h1=False):
+ groups = []
+ current_group = []
+ for section in sections:
+ if skip_h1 and section.startswith('# '):
+ continue
+ elif section.startswith('+ '):
+ groups.append(format_section(current_group, s3_path))
+ groups.append(format_metadata(section))
+ current_group = []
+ elif '![wide:' in section:
+ groups.append(format_section(current_group, s3_path))
+ groups.append(format_section([section], s3_path, type='wide'))
+ current_group = []
+ elif '![' in section:
+ groups.append(format_section(current_group, s3_path))
+ groups.append(format_section([section], s3_path, type='images'))
+ current_group = []
+ else:
+ current_group.append(section)
+ groups.append(format_section(current_group, s3_path))
+ content = "".join(groups)
+ return content
+
+def parse_research_index(research_posts):
+ content = "<div class='research_index'>"
+ for post in research_posts:
+ s3_path = s3.make_s3_path(s3_site_path, post['path'])
+ if 'image' in post:
+ post_image = s3_path + post['image']
+ else:
+ post_image = 'data:image/gif;base64,R0lGODlhAQABAAAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw=='
+ row = "<a href='{}'><section class='wide'><img src='{}' alt='Research post' /><section><h1>{}</h1><h2>{}</h2></section></section></a>".format(
+ post['path'],
+ post_image,
+ post['title'],
+ post['tagline'])
+ content += row
+ content += '</div>'
+ return content
+
+def read_metadata(fn):
+ with open(fn, "r") as file:
+ data = file.read()
+ data = data.replace("\n ", "\n")
+ if "\n" in data:
+ data = data.replace("\r", "")
+ else:
+ data = data.replace("\r", "\n")
+ sections = data.split("\n\n")
+ return parse_metadata(fn, sections)
+
+default_metadata = {
+ 'status': 'published',
+ 'title': 'Untitled Page',
+ 'desc': '',
+ 'slug': '',
+ 'published': '2018-12-31',
+ 'updated': '2018-12-31',
+ 'authors': 'Adam Harvey',
+ 'sync': 'true',
+ 'tagline': '',
+}
+
+def parse_metadata_section(metadata, section):
+ for line in section.split("\n"):
+ if ': ' not in line:
+ continue
+ key, value = line.split(': ', 1)
+ metadata[key.lower()] = value
+
+def parse_metadata(fn, sections):
+ found_meta = False
+ metadata = {}
+ valid_sections = []
+ for section in sections:
+ if not found_meta and ': ' in section:
+ found_meta = True
+ parse_metadata_section(metadata, section)
+ continue
+ if '-----' in section:
+ continue
+ if found_meta:
+ valid_sections.append(section)
+
+ if 'title' not in metadata:
+ print('warning: {} has no title'.format(fn))
+ for key in default_metadata:
+ if key not in metadata:
+ metadata[key] = default_metadata[key]
+
+ basedir = os.path.dirname(fn.replace(content_path, ''))
+ basename = os.path.basename(fn)
+ if basedir == '/':
+ metadata['path'] = '/'
+ metadata['url'] = '/'
+ elif basename == 'index.md':
+ metadata['path'] = basedir + '/'
+ metadata['url'] = metadata['path']
+ else:
+ metadata['path'] = basedir + '/'
+ metadata['url'] = metadata['path'] + basename.replace('.md', '') + '/'
+
+ if metadata['status'] == 'published|draft|private':
+ metadata['status'] = 'published'
+
+ metadata['sync'] = metadata['sync'] != 'false'
+
+ metadata['author_html'] = '<br>'.join(metadata['authors'].split(','))
+ return metadata, valid_sections
+
+def read_research_post_index():
+ posts = []
+ for fn in sorted(glob.glob('../site/content/research/*/index.md')):
+ metadata, valid_sections = read_metadata(fn)
+ if metadata is None or metadata['status'] == 'private' or metadata['status'] == 'draft':
+ continue
+ posts.append(metadata)
+ if not len(posts):
+ posts.append({
+ 'title': 'Placeholder',
+ 'slug': 'placeholder',
+ 'date': 'Placeholder',
+ 'url': '/',
+ })
+ return posts
+
diff --git a/builder/paths.py b/builder/paths.py
new file mode 100644
index 00000000..356f2f3d
--- /dev/null
+++ b/builder/paths.py
@@ -0,0 +1,6 @@
+
+s3_site_path = "v1/site"
+s3_datasets_path = "v1" # datasets is already in the filename
+public_path = "../site/public"
+content_path = "../site/content"
+template_path = "../site/templates"
diff --git a/builder/s3.py b/builder/s3.py
new file mode 100644
index 00000000..41ecdf61
--- /dev/null
+++ b/builder/s3.py
@@ -0,0 +1,61 @@
+import os
+import glob
+import boto3
+from paths import *
+
+session = boto3.session.Session()
+
+s3_client = session.client(
+ service_name='s3',
+ aws_access_key_id=os.getenv('S3_KEY'),
+ aws_secret_access_key=os.getenv('S3_SECRET'),
+ endpoint_url=os.getenv('S3_ENDPOINT'),
+ region_name=os.getenv('S3_REGION'),
+)
+
+def sync_directory(base_fn, s3_path, metadata):
+ fns = {}
+ for fn in glob.glob(os.path.join(base_fn, 'assets/*')):
+ fns[os.path.basename(fn)] = True
+
+ if not metadata['sync']:
+ return
+
+ remote_path = s3_path + metadata['url']
+
+ directory = s3_client.list_objects(Bucket=os.getenv('S3_BUCKET'), Prefix=remote_path)
+ prefixes = []
+
+ if 'Contents' in directory:
+ for obj in directory['Contents']:
+ s3_fn = obj['Key']
+ fn = os.path.basename(s3_fn)
+ local_fn = os.path.join(base_fn, 'assets', fn)
+ if fn in fns:
+ del fns[fn]
+ if obj['LastModified'].timestamp() < os.path.getmtime(os.path.join(local_fn)):
+ print("s3 update {}".format(s3_fn))
+ s3_client.upload_file(
+ local_fn,
+ os.getenv('S3_BUCKET'),
+ s3_fn,
+ ExtraArgs={ 'ACL': 'public-read' })
+ else:
+ print("s3 delete {}".format(s3_fn))
+ response = s3_client.delete_object(
+ Bucket=os.getenv('S3_BUCKET'),
+ Key=s3_fn,
+ )
+
+ for fn in fns:
+ local_fn = os.path.join(base_fn, 'assets', fn)
+ s3_fn = os.path.join(remote_path, 'assets', fn)
+ print("s3 create {}".format(s3_fn))
+ s3_client.upload_file(
+ local_fn,
+ os.getenv('S3_BUCKET'),
+ s3_fn,
+ ExtraArgs={ 'ACL': 'public-read' })
+
+def make_s3_path(s3_dir, metadata_path):
+ return "{}/{}/{}{}".format(os.getenv('S3_ENDPOINT'), os.getenv('S3_BUCKET'), s3_dir, metadata_path)