summaryrefslogtreecommitdiff
path: root/megapixels
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels')
-rw-r--r--megapixels/app/models/sql_factory.py3
-rw-r--r--megapixels/app/server/api.py8
-rw-r--r--megapixels/app/site/builder.py20
-rw-r--r--megapixels/app/site/parser.py16
4 files changed, 35 insertions, 12 deletions
diff --git a/megapixels/app/models/sql_factory.py b/megapixels/app/models/sql_factory.py
index b270afd2..a580f28e 100644
--- a/megapixels/app/models/sql_factory.py
+++ b/megapixels/app/models/sql_factory.py
@@ -61,7 +61,8 @@ def load_sql_dataset(path, replace=False, engine=None, base_model=None):
print('loading dataset {}'.format(fn))
df = pd.read_csv(fn)
# fix columns that are named "index", a sql reserved word
- df.columns = table.__table__.columns.keys()
+ df.reindex_axis(sorted(df.columns), axis=1)
+ df.columns = sorted(table.__table__.columns).keys()
df.to_sql(name=table.__tablename__, con=engine, if_exists='replace', index=False)
return dataset
diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py
index 35862837..3683d5fd 100644
--- a/megapixels/app/server/api.py
+++ b/megapixels/app/server/api.py
@@ -45,18 +45,20 @@ def upload(dataset_name):
dataset = get_dataset(dataset_name)
if dataset_name not in faiss_datasets:
return jsonify({
- 'error': 'invalid dataset'
+ 'error': 'bad_dataset'
})
faiss_dataset = faiss_datasets[dataset_name]
file = request.files['query_img']
fn = file.filename
- if fn.endswith('blob'):
+ if fn.endswith('blob'): # FIX PNG IMAGES?
fn = 'filename.jpg'
basename, ext = os.path.splitext(fn)
# print("got {}, type {}".format(basename, ext))
if ext.lower() not in valid_exts:
- return jsonify({ 'error': 'not an image' })
+ return jsonify({
+ 'error': 'not_an_image'
+ })
im = Image.open(file.stream).convert('RGB')
im_np = pil2np(im)
diff --git a/megapixels/app/site/builder.py b/megapixels/app/site/builder.py
index ff1a0c83..fac49c24 100644
--- a/megapixels/app/site/builder.py
+++ b/megapixels/app/site/builder.py
@@ -14,7 +14,7 @@ env = Environment(
autoescape=select_autoescape([])
)
-def build_page(fn, research_posts):
+def build_page(fn, research_posts, datasets):
"""
build a single page from markdown into the appropriate template
- writes it to site/public/
@@ -40,6 +40,8 @@ def build_page(fn, research_posts):
elif 'research/' in fn:
skip_h1 = True
template = env.get_template("research.html")
+ elif 'datasets/index' in fn:
+ template = env.get_template("datasets.html")
else:
template = env.get_template("page.html")
@@ -60,17 +62,18 @@ def build_page(fn, research_posts):
content=content,
research_posts=research_posts,
latest_research_post=research_posts[-1],
+ datasets=datasets,
)
os.makedirs(output_path, exist_ok=True)
with open(output_fn, "w") as file:
file.write(html)
-def build_research_index(research_posts):
+def build_index(key, research_posts, datasets):
"""
build the index of research (blog) posts
"""
- metadata, sections = parser.read_metadata('../site/content/research/index.md')
+ metadata, sections = parser.read_metadata('../site/content/{}/index.md'.format(key))
template = env.get_template("page.html")
s3_path = s3.make_s3_path(cfg.S3_SITE_PATH, metadata['path'])
content = parser.parse_markdown(sections, s3_path, skip_h1=False)
@@ -80,8 +83,9 @@ def build_research_index(research_posts):
content=content,
research_posts=research_posts,
latest_research_post=research_posts[-1],
+ datasets=datasets,
)
- output_fn = cfg.DIR_SITE_PUBLIC + '/research/index.html'
+ output_fn = '{}/{}/index.html'.format(cfg.DIR_SITE_PUBLIC, key)
with open(output_fn, "w") as file:
file.write(html)
@@ -90,14 +94,16 @@ def build_site():
build the site! =^)
"""
research_posts = parser.read_research_post_index()
+ datasets = parser.read_datasets_index()
for fn in glob.iglob(os.path.join(cfg.DIR_SITE_CONTENT, "**/*.md"), recursive=True):
- build_page(fn, research_posts)
- build_research_index(research_posts)
+ build_page(fn, research_posts, datasets)
+ build_index('research', research_posts, datasets)
def build_file(fn):
"""
build just one page from a filename! =^)
"""
research_posts = parser.read_research_post_index()
+ datasets = parser.read_datasets_index()
fn = os.path.join(cfg.DIR_SITE_CONTENT, fn)
- build_page(fn, research_posts)
+ build_page(fn, research_posts, datasets)
diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py
index b3d3a8c2..d3eccfca 100644
--- a/megapixels/app/site/parser.py
+++ b/megapixels/app/site/parser.py
@@ -66,6 +66,8 @@ def format_applet(section, s3_path):
opt = None
if command == 'python' or command == 'javascript' or command == 'code':
return format_section([ section ], s3_path)
+ if command == '':
+ return ''
applet['command'] = command
if opt:
@@ -221,8 +223,20 @@ def read_research_post_index():
"""
Generate an index of the research (blog) posts
"""
+ return read_post_index('research')
+
+def read_datasets_index():
+ """
+ Generate an index of the datasets
+ """
+ return read_post_index('datasets')
+
+def read_post_index(basedir):
+ """
+ Generate an index of posts
+ """
posts = []
- for fn in sorted(glob.glob('../site/content/research/*/index.md')):
+ for fn in sorted(glob.glob('../site/content/{}/*/index.md'.format(basedir))):
metadata, valid_sections = read_metadata(fn)
if metadata is None or metadata['status'] == 'private' or metadata['status'] == 'draft':
continue