summaryrefslogtreecommitdiff
path: root/megapixels/app/site/loader.py
blob: 8fd7a2f82d696848f8963ab3c32bfd4020e9a685 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import os
import re
import glob

import app.settings.app_cfg as cfg

from app.utils.file_utils import load_json

def read_metadata(fn):
  """
  Read in read a markdown file and extract the metadata
  """
  with open(fn, "r") as file:
    data = file.read()
    data = data.replace("\n ", "\n")
    if "\n" in data:
      data = data.replace("\r", "")
    else:
      data = data.replace("\r", "\n")
    sections = data.split("\n\n")
  return parse_metadata(fn, sections)


default_metadata = {
  'status': 'published',
  'title': 'Untitled Page',
  'desc': '',
  'slug': '',
  'published': '2018-12-31',
  'updated': '2018-12-31',
  'authors': 'Adam Harvey',
  'sync': 'true',
  'tagline': '',
}

def parse_metadata(fn, sections):
  """
  parse the metadata headers in a markdown file
  (everything before the second ---------)
  - determines appropriate urls for this page
  - loads paper if any
  """
  found_meta = False
  metadata = {}
  valid_sections = []
  for section in sections:
    if not found_meta and ': ' in section:
      found_meta = True
      parse_metadata_section(metadata, section)
      continue
    if '-----' in section:
      continue
    if found_meta:
      valid_sections.append(section)

  if 'title' not in metadata:
    print('warning: {} has no title'.format(fn))
  for key in default_metadata:
    if key not in metadata:
      metadata[key] = default_metadata[key]

  basedir = os.path.dirname(fn.replace(cfg.DIR_SITE_CONTENT, ''))
  basename = os.path.basename(fn)
  if basedir == '/':
    metadata['path'] = '/'
    metadata['url'] = '/'
  elif basename == 'index.md':
    metadata['path'] = basedir + '/'
    metadata['url'] = metadata['path']
  else:
    metadata['path'] = basedir + '/'
    metadata['url'] = metadata['path'] + basename.replace('.md', '') + '/'

  if metadata['status'] == 'published|draft|private':
    metadata['status'] = 'published'

  metadata['og_desc'] = re.sub('<[^<]+?>', '', metadata['desc'])

  metadata['sync'] = metadata['sync'] != 'false'

  metadata['author_html'] = '<br>'.join(metadata['authors'].split(','))

  dataset_path = os.path.join(cfg.DIR_SITE_FINAL_CITATIONS, metadata['slug'] + '.json')
  if os.path.exists(dataset_path):
    metadata['meta'] = load_json(dataset_path)
    if not metadata['meta']:
      print("Bad metadata? {}".format(dataset_path))

  return metadata, valid_sections

def parse_metadata_section(metadata, section):
  """
  parse a metadata key: value pair
  """
  for line in section.split("\n"):
    if ': ' not in line:
      continue
    key, value = line.split(': ', 1)
    metadata[key.lower()] = value


def read_research_post_index():
  """
  Generate an index of the research (blog) posts
  """
  return read_post_index('research')


def read_datasets_index():
  """
  Generate an index of the datasets
  """
  return read_post_index('datasets')


def read_post_index(basedir):
  """
  Generate an index of posts
  """
  posts = []
  for fn in sorted(glob.glob(os.path.join(cfg.DIR_SITE_CONTENT, basedir, '*/index.md'))):
    metadata, valid_sections = read_metadata(fn)
    if metadata is None or metadata['status'] == 'private' or metadata['status'] == 'draft':
      continue
    posts.append(metadata)
  if not len(posts):
    posts.append({
      'title': 'Placeholder',
      'slug': 'placeholder',
      'date': 'Placeholder',
      'url': '/',
    })
  return posts