summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--megapixels/app/models/sql_factory.py26
-rw-r--r--megapixels/app/server/api.py7
2 files changed, 18 insertions, 15 deletions
diff --git a/megapixels/app/models/sql_factory.py b/megapixels/app/models/sql_factory.py
index 25c7e784..a71eabb0 100644
--- a/megapixels/app/models/sql_factory.py
+++ b/megapixels/app/models/sql_factory.py
@@ -62,9 +62,7 @@ def load_sql_dataset(path, replace=False, engine=None, base_model=None):
df = pd.read_csv(fn)
# fix columns that are named "index", a sql reserved word
df.reindex_axis(sorted(df.columns), axis=1)
- print(df.columns)
columns = [column.name for column in table.__table__.columns]
- print(columns)
df.columns = columns
df.to_sql(name=table.__tablename__, con=engine, if_exists='replace', index=False)
return dataset
@@ -97,15 +95,23 @@ class SqlDataset:
"""
Get an identity given an ID.
"""
- table = self.get_table('identity')
# id += 1
- identity = table.query.filter(table.record_id <= id).order_by(table.record_id.desc()).first().toJSON()
- return {
- 'uuid': self.select('uuids', id),
- 'identity': identity,
- 'roi': self.select('roi', id),
- 'pose': self.select('pose', id),
- }
+ print('fetching {}'.format(id))
+
+ file_record_table = self.get_table('file_record')
+ file_record = file_record_table.query.filter(file_record_table.id == id).first()
+
+ identity_table = self.get_table('identity')
+ identity = identity_table.query.filter(identity_table.id == file_record.identity_id).first()
+
+ if file_record and identity:
+ return {
+ 'file_record': file_record.toJSON(),
+ 'identity': identity.toJSON(),
+ 'face_roi': self.select('face_roi', id),
+ 'face_pose': self.select('face_pose', id),
+ }
+ return {}
def search_name(self, q):
"""
diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py
index 743e06f4..af3db4d0 100644
--- a/megapixels/app/server/api.py
+++ b/megapixels/app/server/api.py
@@ -113,8 +113,7 @@ def upload(dataset_name):
dists.append(round(float(_d), 2))
ids.append(_i+1)
- file_records = [ dataset.get_file_record(int(_i)) for _i in ids ]
- identities = [ dataset.get_identity(rec.identity_id) for rec in file_records ]
+ identities = [ dataset.get_identity(int(_i)) for _i in ids ]
# print(distances)
# print(ids)
@@ -151,8 +150,6 @@ def name_lookup(dataset_name):
'timing': time.time() - start,
}
- print(terms)
-
if len(terms) == 0:
return jsonify({ 'query': query, 'results': [] })
@@ -193,7 +190,7 @@ def name_lookup(dataset_name):
sorted_names = sorted(lookup.items(), key=operator.itemgetter(1), reverse=True)[0:10]
top_names = [results_lookup[item[0]] for item in sorted_names]
results = dataset.get_file_records_for_identities(top_names)
-
+
print(results)
return jsonify({
'query': query,