summaryrefslogtreecommitdiff
path: root/megapixels/app/site/builder.py
blob: fac49c244422803df8e1a26de3fdb52aad666105 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#!/usr/bin/python

import os
import glob
from jinja2 import Environment, FileSystemLoader, select_autoescape

import app.settings.app_cfg as cfg

import app.site.s3 as s3
import app.site.parser as parser

env = Environment(
  loader=FileSystemLoader(cfg.DIR_SITE_TEMPLATES),
  autoescape=select_autoescape([])
)

def build_page(fn, research_posts, datasets):
  """
  build a single page from markdown into the appropriate template
  - writes it to site/public/
  - syncs any assets with s3
  - handles certain index pages...
  """
  metadata, sections = parser.read_metadata(fn)

  if metadata is None:
    print("{} has no metadata".format(fn))
    return

  print(metadata['url'])

  dirname = os.path.dirname(fn)
  output_path = cfg.DIR_SITE_PUBLIC + metadata['url']
  output_fn = os.path.join(output_path, "index.html")

  skip_h1 = False

  if metadata['url'] == '/':
    template = env.get_template("home.html")
  elif 'research/' in fn:
    skip_h1 = True
    template = env.get_template("research.html")
  elif 'datasets/index' in fn:
    template = env.get_template("datasets.html")
  else:
    template = env.get_template("page.html")

  if 'datasets/' in fn:
    s3_dir = cfg.S3_DATASETS_PATH
  else:
    s3_dir = cfg.S3_SITE_PATH

  s3_path = s3.make_s3_path(s3_dir, metadata['path'])

  if 'index.md' in fn:
    s3.sync_directory(dirname, s3_dir, metadata)

  content = parser.parse_markdown(sections, s3_path, skip_h1=skip_h1)

  html = template.render(
    metadata=metadata,
    content=content,
    research_posts=research_posts,
    latest_research_post=research_posts[-1],
    datasets=datasets,
  )

  os.makedirs(output_path, exist_ok=True)
  with open(output_fn, "w") as file:
    file.write(html)

def build_index(key, research_posts, datasets):
  """
  build the index of research (blog) posts
  """
  metadata, sections = parser.read_metadata('../site/content/{}/index.md'.format(key))
  template = env.get_template("page.html")
  s3_path = s3.make_s3_path(cfg.S3_SITE_PATH, metadata['path'])
  content = parser.parse_markdown(sections, s3_path, skip_h1=False)
  content += parser.parse_research_index(research_posts)
  html = template.render(
    metadata=metadata,
    content=content,
    research_posts=research_posts,
    latest_research_post=research_posts[-1],
    datasets=datasets,
  )
  output_fn = '{}/{}/index.html'.format(cfg.DIR_SITE_PUBLIC, key)
  with open(output_fn, "w") as file:
    file.write(html)

def build_site():
  """
  build the site! =^)
  """
  research_posts = parser.read_research_post_index()
  datasets = parser.read_datasets_index()
  for fn in glob.iglob(os.path.join(cfg.DIR_SITE_CONTENT, "**/*.md"), recursive=True):
    build_page(fn, research_posts, datasets)
  build_index('research', research_posts, datasets)

def build_file(fn):
  """
  build just one page from a filename! =^)
  """
  research_posts = parser.read_research_post_index()
  datasets = parser.read_datasets_index()
  fn = os.path.join(cfg.DIR_SITE_CONTENT, fn)
  build_page(fn, research_posts, datasets)