diff options
| author | Jules Laplace <julescarbon@gmail.com> | 2019-01-12 18:37:43 +0100 |
|---|---|---|
| committer | Jules Laplace <julescarbon@gmail.com> | 2019-01-12 18:37:43 +0100 |
| commit | 5fd0aab76caef8aaf7be77843b9c9260f22dfbb7 (patch) | |
| tree | 2d11f9f86fb181f40fba40ef491e176bbac4bf14 /megapixels/app | |
| parent | c3eec5ef62c6aacf4ca8c8056e1f9150dcd31506 (diff) | |
add prefix to name search
Diffstat (limited to 'megapixels/app')
| -rw-r--r-- | megapixels/app/models/sql_factory.py | 21 | ||||
| -rw-r--r-- | megapixels/app/server/api.py | 68 | ||||
| -rw-r--r-- | megapixels/app/server/create.py | 15 |
3 files changed, 76 insertions, 28 deletions
diff --git a/megapixels/app/models/sql_factory.py b/megapixels/app/models/sql_factory.py index eb91fb37..25c7e784 100644 --- a/megapixels/app/models/sql_factory.py +++ b/megapixels/app/models/sql_factory.py @@ -3,7 +3,7 @@ import glob import time import pandas as pd -from sqlalchemy import create_engine, Table, Column, String, Integer, DateTime, Float +from sqlalchemy import create_engine, Table, Column, String, Integer, DateTime, Float, func from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base @@ -112,7 +112,15 @@ class SqlDataset: Find an identity by name. """ table = self.get_table('identity') - identity_list = table.query.filter(table.fullname.like(q)).order_by(table.fullname.desc()).limit(10) + identity_list = table.query.filter(table.fullname.ilike(q)).order_by(table.fullname.desc()).limit(15) + return identity_list + + def search_description(self, q): + """ + Find an identity by description. + """ + table = self.get_table('identity') + identity_list = table.query.filter(table.description.ilike(q)).order_by(table.description.desc()).limit(15) return identity_list def get_file_records_for_identities(self, identity_list): @@ -123,10 +131,11 @@ class SqlDataset: file_record_table = self.get_table('file_record') for row in identity_list: file_record = file_record_table.query.filter(file_record_table.identity_id == row.id).first() - identities.append({ - 'file_record': file_record.toJSON(), - 'identity': row.toJSON(), - }) + if file_record: + identities.append({ + 'file_record': file_record.toJSON(), + 'identity': row.toJSON(), + }) return identities def select(self, table, id): diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py index 5f33e84b..743e06f4 100644 --- a/megapixels/app/server/api.py +++ b/megapixels/app/server/api.py @@ -1,3 +1,5 @@ +import logging +import logging.handlers import os import re import time @@ -141,36 +143,58 @@ def name_lookup(dataset_name): dataset = get_dataset(dataset_name) q = request.args.get('q') - q = re.sub('[^a-zA-Z ]+', '*', q) + q = re.sub('[^a-zA-Z. ]+', '*', q) terms = q.split(' ') - # print(q) query = { 'q': q, 'timing': time.time() - start, } - + + print(terms) + if len(terms) == 0: - results = [] - elif len(terms) == 1: - names = dataset.search_name('%' + terms[0] + '%') if terms[0] else [] - results = dataset.get_file_records_for_identities(names) - else: - lookup = {} - results_lookup = {} - for i, term in enumerate(terms[0:5]): - search_term = '%' + term + '%' - names = dataset.search_name(term) if term else [] - for name in names: - if name.id in lookup: - lookup[name.id] += 1 - else: - lookup[name.id] = 1 - results_lookup[name.id] = name - top_names = [results_lookup[item[0]] for item in sorted(lookup.items(), key=operator.itemgetter(1))][0:10] - results = dataset.get_file_records_for_identities(top_names) + return jsonify({ 'query': query, 'results': [] }) - # print(results) + lookup = {} + results_lookup = {} + + names = dataset.search_name(q + '%') + for name in names: + if name.id in lookup: + print(name.fullname) + lookup[name.id] += 4 + else: + print(name.fullname) + lookup[name.id] = 4 + results_lookup[name.id] = name + + for i, term in enumerate(terms[0:5]): + search_term = '%' + term + '%' + names = dataset.search_name(search_term) if len(term) > 0 else [] + descriptions = dataset.search_description(search_term) if len(term) > 0 else [] + for name in names: + if name.id in lookup: + print(name.fullname) + lookup[name.id] += 2 + else: + print(name.fullname) + lookup[name.id] = 2 + results_lookup[name.id] = name + for name in descriptions: + if name.id in lookup: + print(name.fullname) + lookup[name.id] += 1 + else: + print(name.fullname) + lookup[name.id] = 1 + results_lookup[name.id] = name + + sorted_names = sorted(lookup.items(), key=operator.itemgetter(1), reverse=True)[0:10] + top_names = [results_lookup[item[0]] for item in sorted_names] + results = dataset.get_file_records_for_identities(top_names) + + print(results) return jsonify({ 'query': query, 'results': results, diff --git a/megapixels/app/server/create.py b/megapixels/app/server/create.py index f46bb2a0..a1ce56df 100644 --- a/megapixels/app/server/create.py +++ b/megapixels/app/server/create.py @@ -1,3 +1,18 @@ +import logging +import logging.handlers + +logger = logging.getLogger("") +logger.setLevel(logging.DEBUG) +handler = logging.handlers.RotatingFileHandler("flask.log", + maxBytes=3000000, backupCount=2) +formatter = logging.Formatter( + '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s') +handler.setFormatter(formatter) +logger.addHandler(handler) +logging.getLogger().addHandler(logging.StreamHandler()) + +logging.debug("starting app") + from flask import Flask, Blueprint, jsonify, send_from_directory from flask_sqlalchemy import SQLAlchemy from app.models.sql_factory import connection_url, load_sql_datasets |
