import click from app.settings import types from app.models.dataset import Dataset from app.utils import click_utils from app.settings import app_cfg as cfg from app.utils.logger_utils import Logger log = Logger.getLogger() @click.command() @click.option('-i', '--input', 'opt_fp_in', required=True, help='Face image file to lookup') @click.option('--data_store', 'opt_data_store', type=cfg.DataStoreVar, default=click_utils.get_default(types.DataStore.HDD), show_default=True, help=click_utils.show_help(types.DataStore)) @click.option('--dataset', 'opt_dataset', type=cfg.DatasetVar, required=True, show_default=True, help=click_utils.show_help(types.Dataset)) @click.option('--results', 'opt_results', default=5, help='Number of match results to display') @click.option('--gpu', 'opt_gpu', default=0, help='GPU index (use -1 for CPU') @click.pass_context def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_results, opt_gpu): """Display image info""" import sys from glob import glob from os.path import join from pathlib import Path import time import pandas as pd import cv2 as cv from tqdm import tqdm import imutils from PIL import Image, ImageOps from app.utils import file_utils, im_utils, display_utils, draw_utils from app.models.data_store import DataStore from app.processors import face_detector from app.processors import face_extractor log = Logger.getLogger() # init dataset dataset = Dataset(opt_data_store, opt_dataset) dataset.load_metadata(types.Metadata.FILE_RECORD) dataset.load_metadata(types.Metadata.FACE_VECTOR) dataset.load_metadata(types.Metadata.FACE_ROI) dataset.load_metadata(types.Metadata.IDENTITY) # init face detection detector = face_detector.DetectorCVDNN() # init face extractor extractor = face_extractor.ExtractorVGG() # load query image im_query = cv.imread(opt_fp_in) # get detection as BBox object bboxes = detector.detect(im_query, largest=True) bbox_norm = bboxes[0] dim = im_query.shape[:2][::-1] bbox_dim = bbox_norm.to_dim(dim) # convert back to real dimensions if not bbox_norm: log.error('No face detected. Exiting') return # extract the face vectors vec_query = extractor.extract(im_query, bbox_norm) log.debug(f'len query: {len(vec_query)}') # find matches image_records = dataset.find_matches(vec_query, n_results=opt_results) # summary im_query = draw_utils.draw_bbox(im_query, bbox_norm, stroke_weight=4) ims_match = [im_query] opt_size = (256,256) for image_record in image_records: image_record.summarize() log.info(f'{image_record.filepath}') im_match = cv.imread(image_record.filepath) dim_match = im_match.shape[:2][::-1] bbox_match = image_record.bbox score = image_record.score if score < .5: clr = (0,255,0) elif score < .6: clr = (0,255,125) elif score < .65: clr = (0,125,125) elif score < .7: clr = (0,125,255) else: clr = (0,0,255) im_match = draw_utils.draw_bbox(im_match, bbox_match, stroke_weight=4, color=clr ) bbox_match_dim = bbox_match.to_dim(dim_match) im_pil = im_utils.ensure_pil(im_match) center = (bbox_match_dim.cx, bbox_match_dim.cy) im_pil = ImageOps.fit(im_pil, opt_size, centering=center) im_np = im_utils.ensure_np(im_pil) if image_record.identity is not None: log.debug(f'identity: {image_record.identity.name_display}') else: log.debug('no identity info') log.debug(f'score: {image_record.score}') ims_match.append(im_np) # make montages of most similar faces montages = imutils.build_montages(ims_match, (256, 256), (3,2)) # display for i, montage in enumerate(montages): cv.imshow(f'{opt_dataset.name.upper()}: page {i}', montage) fp_out = join(Path(opt_fp_in).parent, f'{Path(opt_fp_in).stem}_{i}.png') cv.imwrite(fp_out, montage) display_utils.handle_keyboard()