summaryrefslogtreecommitdiff
path: root/megapixels/app/server
diff options
context:
space:
mode:
authorJules Laplace <julescarbon@gmail.com>2019-01-12 18:37:43 +0100
committerJules Laplace <julescarbon@gmail.com>2019-01-12 18:37:43 +0100
commit5fd0aab76caef8aaf7be77843b9c9260f22dfbb7 (patch)
tree2d11f9f86fb181f40fba40ef491e176bbac4bf14 /megapixels/app/server
parentc3eec5ef62c6aacf4ca8c8056e1f9150dcd31506 (diff)
add prefix to name search
Diffstat (limited to 'megapixels/app/server')
-rw-r--r--megapixels/app/server/api.py68
-rw-r--r--megapixels/app/server/create.py15
2 files changed, 61 insertions, 22 deletions
diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py
index 5f33e84b..743e06f4 100644
--- a/megapixels/app/server/api.py
+++ b/megapixels/app/server/api.py
@@ -1,3 +1,5 @@
+import logging
+import logging.handlers
import os
import re
import time
@@ -141,36 +143,58 @@ def name_lookup(dataset_name):
dataset = get_dataset(dataset_name)
q = request.args.get('q')
- q = re.sub('[^a-zA-Z ]+', '*', q)
+ q = re.sub('[^a-zA-Z. ]+', '*', q)
terms = q.split(' ')
- # print(q)
query = {
'q': q,
'timing': time.time() - start,
}
-
+
+ print(terms)
+
if len(terms) == 0:
- results = []
- elif len(terms) == 1:
- names = dataset.search_name('%' + terms[0] + '%') if terms[0] else []
- results = dataset.get_file_records_for_identities(names)
- else:
- lookup = {}
- results_lookup = {}
- for i, term in enumerate(terms[0:5]):
- search_term = '%' + term + '%'
- names = dataset.search_name(term) if term else []
- for name in names:
- if name.id in lookup:
- lookup[name.id] += 1
- else:
- lookup[name.id] = 1
- results_lookup[name.id] = name
- top_names = [results_lookup[item[0]] for item in sorted(lookup.items(), key=operator.itemgetter(1))][0:10]
- results = dataset.get_file_records_for_identities(top_names)
+ return jsonify({ 'query': query, 'results': [] })
- # print(results)
+ lookup = {}
+ results_lookup = {}
+
+ names = dataset.search_name(q + '%')
+ for name in names:
+ if name.id in lookup:
+ print(name.fullname)
+ lookup[name.id] += 4
+ else:
+ print(name.fullname)
+ lookup[name.id] = 4
+ results_lookup[name.id] = name
+
+ for i, term in enumerate(terms[0:5]):
+ search_term = '%' + term + '%'
+ names = dataset.search_name(search_term) if len(term) > 0 else []
+ descriptions = dataset.search_description(search_term) if len(term) > 0 else []
+ for name in names:
+ if name.id in lookup:
+ print(name.fullname)
+ lookup[name.id] += 2
+ else:
+ print(name.fullname)
+ lookup[name.id] = 2
+ results_lookup[name.id] = name
+ for name in descriptions:
+ if name.id in lookup:
+ print(name.fullname)
+ lookup[name.id] += 1
+ else:
+ print(name.fullname)
+ lookup[name.id] = 1
+ results_lookup[name.id] = name
+
+ sorted_names = sorted(lookup.items(), key=operator.itemgetter(1), reverse=True)[0:10]
+ top_names = [results_lookup[item[0]] for item in sorted_names]
+ results = dataset.get_file_records_for_identities(top_names)
+
+ print(results)
return jsonify({
'query': query,
'results': results,
diff --git a/megapixels/app/server/create.py b/megapixels/app/server/create.py
index f46bb2a0..a1ce56df 100644
--- a/megapixels/app/server/create.py
+++ b/megapixels/app/server/create.py
@@ -1,3 +1,18 @@
+import logging
+import logging.handlers
+
+logger = logging.getLogger("")
+logger.setLevel(logging.DEBUG)
+handler = logging.handlers.RotatingFileHandler("flask.log",
+ maxBytes=3000000, backupCount=2)
+formatter = logging.Formatter(
+ '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s')
+handler.setFormatter(formatter)
+logger.addHandler(handler)
+logging.getLogger().addHandler(logging.StreamHandler())
+
+logging.debug("starting app")
+
from flask import Flask, Blueprint, jsonify, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from app.models.sql_factory import connection_url, load_sql_datasets