diff options
| author | adamhrv <adam@ahprojects.com> | 2019-01-17 11:26:41 +0100 |
|---|---|---|
| committer | adamhrv <adam@ahprojects.com> | 2019-01-17 11:26:41 +0100 |
| commit | cb4d6d6f5be213edbc4f3b1e4452e5b7ce5e9378 (patch) | |
| tree | a6a66d408e68c9a1401cc729a72952ea8f200762 /megapixels/commands/cv/face_roi.py | |
| parent | a672dfdfdbac7cdac43e22c5d0bf29550770e2ad (diff) | |
updates for batch processing
Diffstat (limited to 'megapixels/commands/cv/face_roi.py')
| -rw-r--r-- | megapixels/commands/cv/face_roi.py | 61 |
1 files changed, 37 insertions, 24 deletions
diff --git a/megapixels/commands/cv/face_roi.py b/megapixels/commands/cv/face_roi.py index 950936cf..e83b0f61 100644 --- a/megapixels/commands/cv/face_roi.py +++ b/megapixels/commands/cv/face_roi.py @@ -105,23 +105,29 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset, # get list of files to process - fp_in = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in - df_records = pd.read_csv(fp_in, dtype={'fn':str}).set_index('index') + fp_record = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in + df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index') if opt_slice: - df_records = df_records[opt_slice[0]:opt_slice[1]] - log.debug('processing {:,} files'.format(len(df_records))) + df_record = df_record[opt_slice[0]:opt_slice[1]] + log.debug('processing {:,} files'.format(len(df_record))) # filter out grayscale color_filter = color_filters[opt_color_filter] # set largest flag, to keep all or only largest - opt_largest = opt_largest == 'largest' + opt_largest = (opt_largest == 'largest') data = [] + skipped_files = [] + processed_files = [] - for df_record in tqdm(df_records.itertuples(), total=len(df_records)): + for df_record in tqdm(df_record.itertuples(), total=len(df_record)): fp_im = data_store.face(str(df_record.subdir), str(df_record.fn), str(df_record.ext)) - im = cv.imread(fp_im) - im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + try: + im = cv.imread(fp_im) + im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + except Exception as e: + log.debug(f'could not read: {fp_im}') + return # filter out color or grayscale iamges if color_filter != color_filters['all']: try: @@ -134,31 +140,38 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset, continue try: - bboxes = detector.detect(im_resized, pyramids=opt_pyramids, largest=opt_largest, + bboxes_norm = detector.detect(im_resized, pyramids=opt_pyramids, largest=opt_largest, zone=opt_zone, conf_thresh=opt_conf_thresh) except Exception as e: log.error('could not detect: {}'.format(fp_im)) log.error('{}'.format(e)) continue - for bbox in bboxes: - roi = { - 'record_index': int(df_record.Index), - 'x': bbox.x, - 'y': bbox.y, - 'w': bbox.w, - 'h': bbox.h - } - data.append(roi) - if len(bboxes) == 0: + if len(bboxes_norm) == 0: + skipped_files.append(fp_im) log.warn(f'no faces in: {fp_im}') - + log.warn(f'skipped: {len(skipped_files)}. found:{len(processed_files)} files') + else: + processed_files.append(fp_im) + for bbox in bboxes_norm: + roi = { + 'record_index': int(df_record.Index), + 'x': bbox.x, + 'y': bbox.y, + 'w': bbox.w, + 'h': bbox.h + } + data.append(roi) + # if display optined - if opt_display and len(bboxes): + if opt_display and len(bboxes_norm): # draw each box - for bbox in bboxes: - bbox_dim = bbox.to_dim(im_resized.shape[:2][::-1]) - draw_utils.draw_bbox(im_resized, bbox_dim) + for bbox_norm in bboxes_norm: + dim = im_resized.shape[:2][::-1] + bbox_dim = bbox.to_dim(dim) + if dim[0] > 1000: + im_resized = im_utils.resize(im_resized, width=1000) + im_resized = draw_utils.draw_bbox(im_resized, bbox_norm) # display and wait cv.imshow('', im_resized) |
