summaryrefslogtreecommitdiff
path: root/builder
diff options
context:
space:
mode:
authorJules Laplace <julescarbon@gmail.com>2018-12-04 21:12:59 +0100
committerJules Laplace <julescarbon@gmail.com>2018-12-04 21:12:59 +0100
commitd69086a1b2d7d6e6def55f35e30d0623701de011 (patch)
tree1f73899aa4bcb9ecf0600f0d95f5909c79818780 /builder
parent966e27c7418d6e188ea4b1f651a5e6c67495b765 (diff)
embedding images
Diffstat (limited to 'builder')
-rw-r--r--builder/README.md21
-rw-r--r--builder/__init__.py0
-rw-r--r--builder/builder.py69
-rw-r--r--builder/parser.py111
-rw-r--r--builder/paths.py6
-rw-r--r--builder/s3.py55
6 files changed, 262 insertions, 0 deletions
diff --git a/builder/README.md b/builder/README.md
new file mode 100644
index 00000000..1a6d3a1e
--- /dev/null
+++ b/builder/README.md
@@ -0,0 +1,21 @@
+Megapixels Static Site Generator
+================================
+
+The index, blog, and about other pages are built using this static site generator.
+
+## Metadata
+
+```
+status: published|draft|private
+title: From 1 to 100 Pixels
+desc: High resolution insights from low resolution imagery
+slug: from-1-to-100-pixels
+published: 2018-12-04
+updated: 2018-12-04
+authors: Adam Harvey, Berit Gilma, Matthew Stender
+```
+
+## S3 Assets
+
+Static assets: `v1/site/about/assets/picture.jpg`
+Dataset assets: `v1/datasets/lfw/assets/picture.jpg`
diff --git a/builder/__init__.py b/builder/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/builder/__init__.py
diff --git a/builder/builder.py b/builder/builder.py
new file mode 100644
index 00000000..44fbd1c6
--- /dev/null
+++ b/builder/builder.py
@@ -0,0 +1,69 @@
+#!/usr/bin/python
+
+from dotenv import load_dotenv
+load_dotenv()
+
+import os
+import glob
+from jinja2 import Environment, FileSystemLoader, select_autoescape
+
+import s3
+import parser
+from paths import *
+
+env = Environment(
+ loader=FileSystemLoader(template_path),
+ autoescape=select_autoescape([])
+)
+
+def build_page(fn, research_posts):
+ metadata, sections = parser.read_metadata(fn)
+
+ if metadata is None:
+ print("{} has no metadata".format(fn))
+ return
+
+ print(metadata['url'])
+
+ dirname = os.path.dirname(fn)
+ output_path = public_path + metadata['url']
+ output_fn = os.path.join(output_path, "index.html")
+
+ if 'research/' in fn:
+ template = env.get_template("research.html")
+ else:
+ template = env.get_template("page.html")
+
+ if 'datasets' in fn:
+ s3_path = "{}/{}/{}{}".format(os.getenv('S3_ENDPOINT'), os.getenv('S3_BUCKET'), s3_datasets_path, metadata['path'])
+ if 'index.md' in fn:
+ s3.sync_directory(dirname, s3_datasets_path, metadata)
+ else:
+ s3_path = "{}/{}/{}{}".format(os.getenv('S3_ENDPOINT'), os.getenv('S3_BUCKET'), s3_site_path, metadata['path'])
+ if 'index.md' in fn:
+ s3.sync_directory(dirname, s3_site_path, metadata)
+
+ print(s3_path)
+
+ content = parser.parse_markdown(sections, s3_path)
+
+ html = template.render(
+ metadata=metadata,
+ content=content,
+ research_posts=research_posts,
+ latest_research_post=research_posts[-1],
+ )
+
+ os.makedirs(output_path, exist_ok=True)
+ with open(output_fn, "w") as file:
+ file.write(html)
+
+ print("______")
+
+def build_site():
+ research_posts = parser.read_research_post_index()
+ for fn in glob.iglob(os.path.join(content_path, "**/*.md"), recursive=True):
+ build_page(fn, research_posts)
+
+if __name__ == '__main__':
+ build_site()
diff --git a/builder/parser.py b/builder/parser.py
new file mode 100644
index 00000000..ea273556
--- /dev/null
+++ b/builder/parser.py
@@ -0,0 +1,111 @@
+import os
+import glob
+import mistune
+from paths import *
+
+renderer = mistune.Renderer(escape=False)
+markdown = mistune.Markdown(renderer=renderer)
+
+def fix_images(lines, s3_path):
+ real_lines = []
+ block = "\n\n".join(lines)
+ for line in block.split("\n"):
+ if "![" in line and "](http" not in line:
+ line = line.replace('](', '](' + s3_path)
+ real_lines.append(line)
+ return "\n".join(real_lines)
+
+def wide_section(line, s3_path):
+ lines = fix_images(lines, s3_path)
+ return "<section class='wide'>" + markdown(lines) + "</section>"
+
+def normal_section(lines, s3_path):
+ if len(lines):
+ lines = fix_images(lines, s3_path)
+ return "<section>" + markdown(lines) + "</section>"
+ return ""
+
+def parse_markdown(sections, s3_path):
+ groups = []
+ current_group = []
+ for section in sections:
+ if section.startswith('# '):
+ continue
+ if '![wide]' in section:
+ groups.append(normal_section(current_group, s3_path))
+ groups.append(wide_section([section], s3_path))
+ current_group = []
+ else:
+ current_group.append(section)
+ groups.append(normal_section(current_group, s3_path))
+ content = "".join(groups)
+ return content
+
+def read_metadata(fn):
+ with open(fn, "r") as file:
+ data = file.read()
+ data = data.replace("\n ", "\n")
+ if "\n" in data:
+ data = data.replace("\r", "")
+ else:
+ data = data.replace("\r", "\n")
+ sections = data.split("\n\n")
+ return parse_metadata(fn, sections)
+
+default_metadata = {
+ 'status': 'published',
+ 'title': 'Untitled Page',
+ 'desc': '',
+ 'slug': '',
+ 'published': '2018-12-31',
+ 'updated': '2018-12-31',
+ 'authors': 'Adam Harvey',
+}
+
+def parse_metadata_section(metadata, section):
+ for line in section.split("\n"):
+ if ': ' not in line:
+ continue
+ key, value = line.split(': ', 1)
+ metadata[key.lower()] = value
+
+def parse_metadata(fn, sections):
+ found_meta = False
+ metadata = {}
+ valid_sections = []
+ for section in sections:
+ if not found_meta and ': ' in section:
+ found_meta = True
+ parse_metadata_section(metadata, section)
+ continue
+ if '-----' in section:
+ continue
+ if found_meta:
+ valid_sections.append(section)
+
+ if 'title' not in metadata:
+ print('warning: {} has no title'.format(fn))
+ for key in default_metadata:
+ if key not in metadata:
+ metadata[key] = default_metadata[key]
+ basename = os.path.basename(fn)
+ metadata['path'] = os.path.dirname(fn.replace(content_path, '')) + '/'
+ if basename == 'index.md':
+ metadata['url'] = metadata['path']
+ else:
+ metadata['url'] = metadata['path'] + basename.replace('.md', '') + '/'
+
+ if metadata['status'] == 'published|draft|private':
+ metadata['status'] = 'published'
+ metadata['authors'] = '<br>'.join(metadata['authors'].split(','))
+ return metadata, valid_sections
+
+def read_research_post_index():
+ posts = []
+ for fn in sorted(glob.glob(os.path.join(content_path, 'research/**/index.md'), recursive=True)):
+ metadata, valid_sections = read_metadata(fn)
+ if metadata is None or metadata['status'] == 'private' or metadata['status'] == 'draft':
+ continue
+ posts.append(metadata)
+ return posts
+
diff --git a/builder/paths.py b/builder/paths.py
new file mode 100644
index 00000000..356f2f3d
--- /dev/null
+++ b/builder/paths.py
@@ -0,0 +1,6 @@
+
+s3_site_path = "v1/site"
+s3_datasets_path = "v1" # datasets is already in the filename
+public_path = "../site/public"
+content_path = "../site/content"
+template_path = "../site/templates"
diff --git a/builder/s3.py b/builder/s3.py
new file mode 100644
index 00000000..7d4d52a0
--- /dev/null
+++ b/builder/s3.py
@@ -0,0 +1,55 @@
+import os
+import glob
+import boto3
+from paths import *
+
+session = boto3.session.Session()
+
+s3_client = session.client(
+ service_name='s3',
+ aws_access_key_id=os.getenv('S3_KEY'),
+ aws_secret_access_key=os.getenv('S3_SECRET'),
+ endpoint_url=os.getenv('S3_ENDPOINT'),
+ region_name=os.getenv('S3_REGION'),
+)
+
+def sync_directory(base_fn, s3_path, metadata):
+ fns = {}
+ for fn in glob.glob(os.path.join(base_fn, 'assets/*')):
+ fns[os.path.basename(fn)] = True
+
+ remote_path = s3_path + metadata['url']
+
+ directory = s3_client.list_objects(Bucket=os.getenv('S3_BUCKET'), Prefix=remote_path)
+ prefixes = []
+
+ if 'Contents' in directory:
+ for obj in directory['Contents']:
+ s3_fn = obj['Key']
+ fn = os.path.basename(s3_fn)
+ local_fn = os.path.join(base_fn, 'assets', fn)
+ if fn in fns:
+ del fns[fn]
+ if obj['LastModified'].timestamp() < os.path.getmtime(os.path.join(local_fn)):
+ print("s3 update {}".format(s3_fn))
+ client.upload_file(
+ local_fn,
+ os.getenv('S3_BUCKET'),
+ s3_fn,
+ ExtraArgs={ 'ACL': 'public-read' })
+ else:
+ print("s3 delete {}".format(s3_fn))
+ response = client.delete_object(
+ Bucket=os.getenv('S3_BUCKET'),
+ Key=s3_fn,
+ )
+
+ for fn in fns:
+ local_fn = os.path.join(base_fn, 'assets', fn)
+ s3_fn = os.path.join(remote_path, 'assets', fn)
+ print("s3 create {}".format(s3_fn))
+ s3_client.upload_file(
+ local_fn,
+ os.getenv('S3_BUCKET'),
+ s3_fn,
+ ExtraArgs={ 'ACL': 'public-read' })