summaryrefslogtreecommitdiff
path: root/megapixels/app/server/api.py
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/app/server/api.py')
-rw-r--r--megapixels/app/server/api.py57
1 files changed, 49 insertions, 8 deletions
diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py
index 3683d5fd..b3bce9bc 100644
--- a/megapixels/app/server/api.py
+++ b/megapixels/app/server/api.py
@@ -3,6 +3,7 @@ import re
import time
import dlib
import numpy as np
+import operator
from flask import Blueprint, request, jsonify
from PIL import Image # todo: try to remove PIL dependency
@@ -27,7 +28,6 @@ def index():
"""List the datasets and their fields"""
return jsonify({ 'datasets': list_datasets() })
-
@api.route('/dataset/<dataset_name>')
def show(dataset_name):
"""Show the data that a dataset will return"""
@@ -37,7 +37,6 @@ def show(dataset_name):
else:
return jsonify({ 'status': 404 })
-
@api.route('/dataset/<dataset_name>/face', methods=['POST'])
def upload(dataset_name):
"""Query an image against FAISS and return the matching identities"""
@@ -108,9 +107,10 @@ def upload(dataset_name):
for _d, _i in zip(distances, indexes):
if _d <= THRESHOLD:
dists.append(round(float(_d), 2))
- ids.append(_i+1)
+ ids.append(_i)
- results = [ dataset.get_identity(int(_i)) for _i in ids ]
+ identities = [ dataset.get_identity(int(_i)) for _i in ids ]
+ identities = list(filter(None, identities))
# print(distances)
# print(ids)
@@ -127,7 +127,7 @@ def upload(dataset_name):
# print(results)
return jsonify({
'query': query,
- 'results': results,
+ 'results': identities,
'distances': dists,
})
@@ -139,15 +139,56 @@ def name_lookup(dataset_name):
dataset = get_dataset(dataset_name)
q = request.args.get('q')
- # print(q)
+ q = re.sub('[^a-zA-Z. ]+', '*', q)
+ terms = q.split(' ')
query = {
'q': q,
'timing': time.time() - start,
}
- results = dataset.search_name(q + '%') if q else None
+
+ if len(terms) == 0:
+ return jsonify({ 'query': query, 'results': [] })
+
+ lookup = {}
+ results_lookup = {}
+
+ names = dataset.search_name(q + '%')
+ for name in names:
+ if name.id in lookup:
+ print(name.fullname)
+ lookup[name.id] += 4
+ else:
+ print(name.fullname)
+ lookup[name.id] = 4
+ results_lookup[name.id] = name
- # print(results)
+ for i, term in enumerate(terms[0:5]):
+ search_term = '%' + term + '%'
+ names = dataset.search_name(search_term) if len(term) > 0 else []
+ descriptions = dataset.search_description(search_term) if len(term) > 0 else []
+ for name in names:
+ if name.id in lookup:
+ print(name.fullname)
+ lookup[name.id] += 2
+ else:
+ print(name.fullname)
+ lookup[name.id] = 2
+ results_lookup[name.id] = name
+ for name in descriptions:
+ if name.id in lookup:
+ print(name.fullname)
+ lookup[name.id] += 1
+ else:
+ print(name.fullname)
+ lookup[name.id] = 1
+ results_lookup[name.id] = name
+
+ sorted_names = sorted(lookup.items(), key=operator.itemgetter(1), reverse=True)[0:10]
+ top_names = [results_lookup[item[0]] for item in sorted_names]
+ results = dataset.get_file_records_for_identities(top_names)
+
+ print(results)
return jsonify({
'query': query,
'results': results,