summaryrefslogtreecommitdiff
path: root/megapixels/app/models
diff options
context:
space:
mode:
authorAdam Harvey <adam@ahprojects.com>2018-12-23 01:37:03 +0100
committerAdam Harvey <adam@ahprojects.com>2018-12-23 01:37:03 +0100
commit4452e02e8b04f3476273574a875bb60cfbb4568b (patch)
tree3ffa44f9621b736250a8b94da14a187dc785c2fe /megapixels/app/models
parent2a65f7a157bd4bace970cef73529867b0e0a374d (diff)
parent5340bee951c18910fd764241945f1f136b5a22b4 (diff)
.
Diffstat (limited to 'megapixels/app/models')
-rw-r--r--megapixels/app/models/bbox.py82
-rw-r--r--megapixels/app/models/data_store.py67
-rw-r--r--megapixels/app/models/dataset.py229
-rw-r--r--megapixels/app/models/sql_factory.py224
4 files changed, 580 insertions, 22 deletions
diff --git a/megapixels/app/models/bbox.py b/megapixels/app/models/bbox.py
index 41b67416..55a92512 100644
--- a/megapixels/app/models/bbox.py
+++ b/megapixels/app/models/bbox.py
@@ -1,6 +1,9 @@
+import math
+
from dlib import rectangle as dlib_rectangle
import numpy as np
+
class BBoxPoint:
def __init__(self, x, y):
@@ -42,8 +45,12 @@ class BBox:
self._tl = (x1, y1)
self._br = (x2, y2)
self._rect = (self._x1, self._y1, self._x2, self._y2)
+ self._area = self._width * self._height # as percentage
-
+ @property
+ def area(self):
+ return self._area
+
@property
def pt_tl(self):
return self._tl
@@ -105,7 +112,12 @@ class BBox:
# # Utils
# def constrain(self, dim):
-
+ def distance(self, b):
+ a = self
+ dcx = self._cx - b.cx
+ dcy = self._cy - b.cy
+ d = int(math.sqrt(math.pow(dcx, 2) + math.pow(dcy, 2)))
+ return d
# -----------------------------------------------------------------
# Modify
@@ -117,26 +129,40 @@ class BBox:
:returns (BBox) in pixel dimensions
"""
# expand
- rect_exp = list( (np.array(self._rect) + np.array([-amt, -amt, amt, amt])).astype('int'))
+ r = list( (np.array(self._rect) + np.array([-amt, -amt, amt, amt])).astype('int'))
# outliers
oob = list(range(4))
- oob[0] = min(rect_exp[0], 0)
- oob[1] = min(rect_exp[1], 0)
- oob[2] = dim[0] - max(rect_exp[2], 2)
- oob[3] = dim[1] - max(rect_exp[3], 3)
+ oob[0] = min(r[0], 0)
+ oob[1] = min(r[1], 0)
+ oob[2] = dim[0] - r[2]
+ oob[3] = dim[1] - r[3]
oob = np.array(oob)
oob[oob > 0] = 0
- # amount
+ # absolute amount
oob = np.absolute(oob)
- # threshold
- rect_exp[0] = max(rect_exp[0], 0)
- rect_exp[1] = max(rect_exp[1], 0)
- rect_exp[2] = min(rect_exp[2], dim[0])
- rect_exp[3] = min(rect_exp[3], dim[1])
+ # threshold expanded rectangle
+ r[0] = max(r[0], 0)
+ r[1] = max(r[1], 0)
+ r[2] = min(r[2], dim[0])
+ r[3] = min(r[3], dim[1])
# redistribute oob amounts
oob = np.array([-oob[2], -oob[3], oob[0], oob[1]])
- rect_exp = np.add(np.array(rect_exp), oob)
- return BBox(*rect_exp)
+ r = np.add(np.array(r), oob)
+ # find overage
+ oob[0] = min(r[0], 0)
+ oob[1] = min(r[1], 0)
+ oob[2] = dim[0] - r[2]
+ oob[3] = dim[1] - r[3]
+ oob = np.array(oob)
+ oob[oob > 0] = 0
+ oob = np.absolute(oob)
+ if np.array(oob).any():
+ m = np.max(oob)
+ adj = np.array([m, m, -m, -m])
+ # print(adj)
+ r = np.add(np.array(r), adj)
+
+ return BBox(*r)
# -----------------------------------------------------------------
@@ -156,23 +182,23 @@ class BBox:
# -----------------------------------------------------------------
# Format as
- def as_xyxy(self):
+ def to_xyxy(self):
"""Converts BBox back to x1, y1, x2, y2 rect"""
return (self._x1, self._y1, self._x2, self._y2)
- def as_xywh(self):
+ def to_xywh(self):
"""Converts BBox back to haar type"""
return (self._x1, self._y1, self._width, self._height)
- def as_trbl(self):
+ def to_trbl(self):
"""Converts BBox to CSS (top, right, bottom, left)"""
return (self._y1, self._x2, self._y2, self._x1)
- def as_dlib(self):
+ def to_dlib(self):
"""Converts BBox to dlib rect type"""
- return dlib.rectangle(self._x1, self._y1, self._x2, self._y2)
+ return dlib_rectangle(self._x1, self._y1, self._x2, self._y2)
- def as_yolo(self):
+ def to_yolo(self):
"""Converts BBox to normalized center x, center y, w, h"""
return (self._cx, self._cy, self._width, self._height)
@@ -199,6 +225,13 @@ class BBox:
return cls(*rect)
@classmethod
+ def from_xyxy(cls, x1, y1, x2, y2):
+ """Converts x1, y1, x2, y2 to BBox
+ same as constructure but zprovided for conveniene
+ """
+ return cls(x1, y1, x2, y2)
+
+ @classmethod
def from_xywh(cls, x, y, w, h):
"""Converts x1, y1, w, h to BBox
:param rect: (list) x1, y1, w, h
@@ -227,8 +260,13 @@ class BBox:
"""
rect = (rect.left(), rect.top(), rect.right(), rect.bottom())
rect = cls.normalize(cls, rect, dim)
- return cls(*rect)
+ return cls(*rect)
+
+ def __str__(self):
+ return f'BBox: ({self._x1},{self._y1}), ({self._x2}, {self._y2}), width:{self._width}, height:{self._height}'
+ def __repr__(self):
+ return f'BBox: ({self._x1},{self._y1}), ({self._x2}, {self._y2}), width:{self._width}, height:{self._height}'
def str(self):
"""Return BBox as a string "x1, y1, x2, y2" """
diff --git a/megapixels/app/models/data_store.py b/megapixels/app/models/data_store.py
new file mode 100644
index 00000000..7b6bef21
--- /dev/null
+++ b/megapixels/app/models/data_store.py
@@ -0,0 +1,67 @@
+import os
+from os.path import join
+import logging
+
+from app.settings import app_cfg as cfg
+from app.settings import types
+
+
+# -------------------------------------------------------------------------
+# Metadata and media files
+# -------------------------------------------------------------------------
+
+class DataStore:
+ # local data store
+ def __init__(self, opt_data_store, opt_dataset):
+ self.data_store = join(f'/data_store_{opt_data_store.name.lower()}')
+ self.dir_dataset = join(self.data_store, 'datasets', cfg.DIR_PEOPLE, opt_dataset.name.lower())
+ self.dir_media = join(self.dir_dataset, 'media')
+ self.dir_metadata = join(self.dir_dataset, 'metadata')
+
+ def metadata(self, enum_type):
+ return join(self.dir_metadata, f'{enum_type.name.lower()}.csv')
+
+ def metadata_dir(self):
+ return join(self.dir_metadata)
+
+ def media_images_original(self):
+ return join(self.dir_media, 'original')
+
+ def face(self, subdir, fn, ext):
+ return join(self.dir_media, 'original', subdir, f'{fn}.{ext}')
+
+ def face_crop(self, subdir, fn, ext):
+ return join(self.dir_media, 'cropped', subdir, f'{fn}.{ext}')
+
+ def face_uuid(self, uuid, ext):
+ return join(self.dir_media, 'uuid',f'{uuid}.{ext}')
+
+ def face_crop_uuid(self, uuid, ext):
+ return join(self.dir_media, 'uuid', f'{uuid}.{ext}')
+
+ def uuid_dir(self):
+ return join(self.dir_media, 'uuid')
+
+
+class DataStoreS3:
+ # S3 server
+ def __init__(self, opt_dataset):
+ self._dir_media = join(cfg.S3_HTTP_MEDIA_URL, opt_dataset.name.lower())
+ self._dir_metadata = join(cfg.S3_HTTP_METADATA_URL, opt_dataset.name.lower())
+
+ def metadata(self, opt_metadata_type, ext='csv'):
+ return join(self._dir_metadata, f'{opt_metadata_type.name.lower()}.{ext}')
+
+ def face(self, opt_uuid, ext='jpg'):
+ #return join(self._dir_media, 'original', f'{opt_uuid}.{ext}')
+ return join(self._dir_media, f'{opt_uuid}.{ext}')
+
+ def face_crop(self, opt_uuid, ext='jpg'):
+ # not currently using?
+ return join(self._dir_media, 'cropped', f'{opt_uuid}.{ext}')
+
+
+
+# -------------------------------------------------------------------------
+# Models
+# ------------------------------------------------------------------------- \ No newline at end of file
diff --git a/megapixels/app/models/dataset.py b/megapixels/app/models/dataset.py
new file mode 100644
index 00000000..eb0109a7
--- /dev/null
+++ b/megapixels/app/models/dataset.py
@@ -0,0 +1,229 @@
+"""
+Dataset model: container for all CSVs about a dataset
+"""
+import os
+import sys
+from os.path import join
+from pathlib import Path
+import logging
+
+import pandas as pd
+import numpy as np
+
+from app.settings import app_cfg as cfg
+from app.settings import types
+from app.models.bbox import BBox
+from app.utils import file_utils, im_utils
+from app.models.data_store import DataStore, DataStoreS3
+from app.utils.logger_utils import Logger
+
+# -------------------------------------------------------------------------
+# Dataset
+# -------------------------------------------------------------------------
+
+class Dataset:
+
+ def __init__(self, opt_data_store, opt_dataset_type):
+ self._dataset_type = opt_dataset_type # enum type
+ self.log = Logger.getLogger()
+ self._metadata = {}
+ self._face_vectors = []
+ self._nullframe = pd.DataFrame() # empty placeholder
+ self.data_store = DataStore(opt_data_store, self._dataset_type)
+ self.data_store_s3 = DataStoreS3(self._dataset_type)
+
+ def load_face_vectors(self):
+ metadata_type = types.Metadata.FACE_VECTOR
+ fp_csv = self.data_store.metadata(metadata_type)
+ self.log.info(f'loading: {fp_csv}')
+ if Path(fp_csv).is_file():
+ self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index')
+ # convert DataFrame to list of floats
+ self._face_vectors = self.df_vecs_to_dict(self._metadata[metadata_type])
+ self._face_vector_roi_idxs = self.df_vec_roi_idxs_to_dict(self._metadata[metadata_type])
+ self.log.info(f'build face vector dict: {len(self._face_vectors)}')
+ # remove the face vector column, it can be several GB of memory
+ self._metadata[metadata_type].drop('vec', axis=1, inplace=True)
+ else:
+ self.log.error(f'File not found: {fp_csv}. Exiting.')
+ sys.exit()
+
+ def load_records(self):
+ metadata_type = types.Metadata.FILE_RECORD
+ fp_csv = self.data_store.metadata(metadata_type)
+ self.log.info(f'loading: {fp_csv}')
+ if Path(fp_csv).is_file():
+ self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index')
+ else:
+ self.log.error(f'File not found: {fp_csv}. Exiting.')
+ sys.exit()
+
+ def load_identities(self):
+ metadata_type = types.Metadata.IDENTITY
+ fp_csv = self.data_store.metadata(metadata_type)
+ self.log.info(f'loading: {fp_csv}')
+ if Path(fp_csv).is_file():
+ self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index')
+ else:
+ self.log.error(f'File not found: {fp_csv}. Exiting.')
+ sys.exit()
+
+ def metadata(self, opt_metadata_type):
+ return self._metadata.get(opt_metadata_type, None)
+
+ def index_to_record(self, index):
+ # get record meta
+ df_record = self._metadata[types.Metadata.FILE_RECORD]
+ ds_record = df_record.iloc[index]
+ identity_index = ds_record.identity_index
+ # get identity meta
+ df_identity = self._metadata[types.Metadata.IDENTITY]
+ # future datasets can have multiple identities per images
+ ds_identities = df_identity.iloc[identity_index]
+ # get filepath and S3 url
+ fp_im = self.data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ s3_url = self.data_store_s3.face(ds_record.uuid)
+ image_record = ImageRecord(ds_record, fp_im, s3_url, ds_identities=ds_identities)
+ return image_record
+
+ def vector_to_record(self, record_index):
+ '''Accumulates image and its metadata'''
+ df_face_vector = self._metadata[types.Metadata.FACE_VECTOR]
+ ds_face_vector = df_face_vector.iloc[vector_index]
+ # get the match's ROI index
+ image_index = ds_face_vector.image_index
+ # get the roi dataframe
+ df_face_roi = self._metadata[types.Metadata.FACE_ROI]
+ ds_roi = df_face_roi.iloc[image_index]
+ # create BBox
+ dim = (ds_roi.image_width, ds_roi.image_height)
+ bbox = BBox.from_xywh_dim(ds_roi.x, ds_roi.y, ds_roi.w, ds_roi.y, dim)
+ # use the ROI index to get identity index from the identity DataFrame
+ df_sha256 = self._metadata[types.Metadata.SHA256]
+ ds_sha256 = df_sha256.iloc[image_index]
+ sha256 = ds_sha256.sha256
+ # get the local filepath
+ df_filepath = self._metadata[types.Metadata.FILEPATH]
+ ds_file = df_filepath.iloc[image_index]
+ fp_im = self.data_store.face_image(ds_file.subdir, ds_file.fn, ds_file.ext)\
+ # get remote path
+ df_uuid = self._metadata[types.Metadata.UUID]
+ ds_uuid = df_uuid.iloc[image_index]
+ uuid = ds_uuid.uuid
+ fp_url = self.data_store_s3.face_image(uuid)
+ fp_url_crop = self.data_store_s3.face_image_crop(uuid)
+
+ image_record = ImageRecord(image_index, sha256, uuid, bbox, fp_im, fp_url)
+ # now get the identity index (if available)
+ identity_index = ds_sha256.identity_index
+ if identity_index > -1:
+ # then use the identity index to get the identity meta
+ df_identity = df_filepath = self._metadata[types.Metadata.IDENTITY]
+ ds_identity = df_identity.iloc[identity_index]
+ # get the name and description
+ name = ds_identity.fullname
+ desc = ds_identity.description
+ gender = ds_identity.gender
+ n_images = ds_identity.images
+ url = '(url)' # TODO
+ age = '(age)' # TODO
+ nationality = '(nationality)'
+ identity = Identity(identity_index, name=name, desc=desc, gender=gender, n_images=n_images,
+ url=url, age=age, nationality=nationality)
+ image_record.identity = identity
+ else:
+ self.log.info(f'no identity index: {ds_sha256}')
+
+ return image_record
+
+
+ def find_matches(self, query_vec, n_results=5, threshold=0.6):
+ image_records = [] # list of image matches w/identity if available
+ # find most similar feature vectors indexes
+ #match_idxs = self.similar(query_vec, n_results, threshold)
+ sim_scores = np.linalg.norm(np.array([query_vec]) - np.array(self._face_vectors), axis=1)
+ match_idxs = np.argpartition(sim_scores, n_results)[:n_results]
+
+ for match_idx in match_idxs:
+ # get the corresponding face vector row
+ roi_index = self._face_vector_roi_idxs[match_idx]
+ df_record = self._metadata[types.Metadata.FILE_RECORD]
+ ds_record = df_record.iloc[roi_index]
+ self.log.debug(f'find match index: {match_idx}, --> roi_index: {roi_index}')
+ fp_im = self.data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ s3_url = self.data_store_s3.face(ds_record.uuid)
+ image_record = ImageRecord(ds_record, fp_im, s3_url)
+ #roi_index = self._face_vector_roi_idxs[match_idx]
+ #image_record = self.roi_idx_to_record(roi_index)
+ image_records.append(image_record)
+ return image_records
+
+ # ----------------------------------------------------------------------
+ # utilities
+
+ def df_vecs_to_dict(self, df):
+ # convert the DataFrame CSV to float list of vecs
+ return [list(map(float,x.vec.split(','))) for x in df.itertuples()]
+
+ def df_vec_roi_idxs_to_dict(self, df):
+ # convert the DataFrame CSV to float list of vecs
+ #return [x.roi_index for x in df.itertuples()]
+ return [x.roi_index for x in df.itertuples()]
+
+ def similar(self, query_vec, n_results):
+ '''Finds most similar N indices of query face vector
+ :query_vec: (list) of 128 floating point numbers of face encoding
+ :n_results: (int) number of most similar indices to return
+ :returns (list) of (int) indices
+ '''
+ # uses np.linalg based on the ageitgey/face_recognition code
+
+ return top_idxs
+
+
+
+class ImageRecord:
+
+ def __init__(self, ds_record, fp, url, ds_rois=None, ds_identities=None):
+ # maybe more other meta will go there
+ self.image_index = ds_record.index
+ self.sha256 = ds_record.sha256
+ self.uuid = ds_record.uuid
+ self.filepath = fp
+ self.url = url
+ self._identities = []
+ # image records contain ROIs
+ # ROIs are linked to identities
+
+ #self._identities = [Identity(x) for x in ds_identities]
+
+ @property
+ def identity(self, index):
+ return self._identity
+
+ def summarize(self):
+ '''Summarizes data for debugging'''
+ log = Logger.getLogger()
+ log.info(f'filepath: {self.filepath}')
+ log.info(f'sha256: {self.sha256}')
+ log.info(f'UUID: {self.uuid}')
+ log.info(f'S3 url: {self.url}')
+ for identity in self._identities:
+ log.info(f'fullname: {identity.fullname}')
+ log.info(f'description: {identity.description}')
+ log.info(f'gender: {identity.gender}')
+ log.info(f'images: {identity.n_images}')
+
+
+class Identity:
+
+ def __init__(self, idx, name='NA', desc='NA', gender='NA', n_images=1,
+ url='NA', age='NA', nationality='NA'):
+ self.index = idx
+ self.name = name
+ self.description = desc
+ self.gender = gender
+ self.n_images = n_images
+ self.url = url
+ self.age = age
+ self.nationality = nationality
diff --git a/megapixels/app/models/sql_factory.py b/megapixels/app/models/sql_factory.py
new file mode 100644
index 00000000..da95b539
--- /dev/null
+++ b/megapixels/app/models/sql_factory.py
@@ -0,0 +1,224 @@
+import os
+import glob
+import time
+import pandas as pd
+
+from sqlalchemy import create_engine, Table, Column, String, Integer, DateTime, Float
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy.ext.declarative import declarative_base
+
+from app.utils.file_utils import load_recipe, load_csv_safe
+from app.settings import app_cfg as cfg
+
+connection_url = "mysql+mysqlconnector://{}:{}@{}/{}?charset=utf8mb4".format(
+ os.getenv("DB_USER"),
+ os.getenv("DB_PASS"),
+ os.getenv("DB_HOST"),
+ os.getenv("DB_NAME")
+)
+
+datasets = {}
+loaded = False
+Session = None
+
+def list_datasets():
+ return [dataset.describe() for dataset in datasets.values()]
+
+def get_dataset(name):
+ return datasets[name] if name in datasets else None
+
+def get_table(name, table_name):
+ dataset = get_dataset(name)
+ return dataset.get_table(table_name) if dataset else None
+
+def load_sql_datasets(replace=False, base_model=None):
+ global datasets, loaded, Session
+ if loaded:
+ return datasets
+ engine = create_engine(connection_url, encoding="utf-8")
+ # db.set_character_set('utf8')
+ # dbc = db.cursor()
+ # dbc.execute('SET NAMES utf8;')
+ # dbc.execute('SET CHARACTER SET utf8;')
+ # dbc.execute('SET character_set_connection=utf8;')
+ Session = sessionmaker(bind=engine)
+ for path in glob.iglob(os.path.join(cfg.DIR_FAISS_METADATA, "*")):
+ dataset = load_sql_dataset(path, replace, engine, base_model)
+ datasets[dataset.name] = dataset
+ loaded = True
+ return datasets
+
+def load_sql_dataset(path, replace=False, engine=None, base_model=None):
+ name = os.path.basename(path)
+ dataset = SqlDataset(name, base_model=base_model)
+
+ for fn in glob.iglob(os.path.join(path, "*.csv")):
+ key = os.path.basename(fn).replace(".csv", "")
+ table = dataset.get_table(key)
+ if table is None:
+ continue
+ if replace:
+ print('loading dataset {}'.format(fn))
+ df = pd.read_csv(fn)
+ # fix columns that are named "index", a sql reserved word
+ df.columns = table.__table__.columns.keys()
+ df.to_sql(name=table.__tablename__, con=engine, if_exists='replace', index=False)
+ return dataset
+
+class SqlDataset:
+ """
+ Bridge between the facial information CSVs connected to the datasets, and MySQL
+ - each dataset should have files that can be loaded into these database models
+ - names will be fixed to work in SQL (index -> id)
+ - we can then have more generic models for fetching this info after doing a FAISS query
+ """
+ def __init__(self, name, engine=None, base_model=None):
+ self.name = name
+ self.tables = {}
+ if base_model is None:
+ self.engine = create_engine(connection_url)
+ base_model = declarative_base(engine)
+ self.base_model = base_model
+
+ def describe(self):
+ return {
+ 'name': self.name,
+ 'tables': list(self.tables.keys()),
+ }
+
+ def get_identity(self, id):
+ table = self.get_table('identity_meta')
+ # id += 1
+ identity = table.query.filter(table.image_id <= id).order_by(table.image_id.desc()).first().toJSON()
+ return {
+ 'uuid': self.select('uuids', id),
+ 'identity': identity,
+ 'roi': self.select('roi', id),
+ 'pose': self.select('pose', id),
+ }
+
+ def search_name(self, q):
+ table = self.get_table('identity_meta')
+ uuid_table = self.get_table('uuids')
+
+ identity = table.query.filter(table.fullname.like(q)).order_by(table.fullname.desc()).limit(30)
+ identities = []
+ for row in identity:
+ uuid = uuid_table.query.filter(uuid_table.id == row.image_id).first()
+ identities.append({
+ 'uuid': uuid.toJSON(),
+ 'identity': row.toJSON(),
+ })
+ return identities
+
+ def select(self, table, id):
+ table = self.get_table(table)
+ if not table:
+ return None
+ session = Session()
+ # for obj in session.query(table).filter_by(id=id):
+ # print(table)
+ obj = session.query(table).filter(table.id == id).first()
+ session.close()
+ return obj.toJSON()
+
+ def get_table(self, type):
+ if type in self.tables:
+ return self.tables[type]
+ elif type == 'uuids':
+ self.tables[type] = self.uuid_table()
+ elif type == 'roi':
+ self.tables[type] = self.roi_table()
+ elif type == 'identity_meta':
+ self.tables[type] = self.identity_table()
+ elif type == 'pose':
+ self.tables[type] = self.pose_table()
+ else:
+ return None
+ return self.tables[type]
+
+ # ==> uuids.csv <==
+ # index,uuid
+ # 0,f03fd921-2d56-4e83-8115-f658d6a72287
+ def uuid_table(self):
+ class UUID(self.base_model):
+ __tablename__ = self.name + "_uuid"
+ id = Column(Integer, primary_key=True)
+ uuid = Column(String(36, convert_unicode=True), nullable=False)
+ def toJSON(self):
+ return {
+ 'id': self.id,
+ 'uuid': self.uuid,
+ }
+ return UUID
+
+ # ==> roi.csv <==
+ # index,h,image_height,image_index,image_width,w,x,y
+ # 0,0.33000000000000007,250,0,250,0.32999999999999996,0.33666666666666667,0.35
+ def roi_table(self):
+ class ROI(self.base_model):
+ __tablename__ = self.name + "_roi"
+ id = Column(Integer, primary_key=True)
+ h = Column(Float, nullable=False)
+ image_height = Column(Integer, nullable=False)
+ image_index = Column(Integer, nullable=False)
+ image_width = Column(Integer, nullable=False)
+ w = Column(Float, nullable=False)
+ x = Column(Float, nullable=False)
+ y = Column(Float, nullable=False)
+ def toJSON(self):
+ return {
+ 'id': self.id,
+ 'image_index': self.image_index,
+ 'image_height': self.image_height,
+ 'image_width': self.image_width,
+ 'w': self.w,
+ 'h': self.h,
+ 'x': self.x,
+ 'y': self.y,
+ }
+ return ROI
+
+ # ==> identity.csv <==
+ # index,fullname,description,gender,images,image_index
+ # 0,A. J. Cook,Canadian actress,f,1,0
+ def identity_table(self):
+ class Identity(self.base_model):
+ __tablename__ = self.name + "_identity"
+ id = Column(Integer, primary_key=True)
+ fullname = Column(String(36, convert_unicode=True), nullable=False)
+ description = Column(String(36, convert_unicode=True), nullable=False)
+ gender = Column(String(1, convert_unicode=True), nullable=False)
+ images = Column(Integer, nullable=False)
+ image_id = Column(Integer, nullable=False)
+ def toJSON(self):
+ return {
+ 'id': self.id,
+ 'image_id': self.image_id,
+ 'fullname': self.fullname,
+ 'images': self.images,
+ 'gender': self.gender,
+ 'description': self.description,
+ }
+ return Identity
+
+ # ==> pose.csv <==
+ # index,image_index,pitch,roll,yaw
+ # 0,0,11.16264458441435,10.415885631337728,22.99719032415318
+ def pose_table(self):
+ class Pose(self.base_model):
+ __tablename__ = self.name + "_pose"
+ id = Column(Integer, primary_key=True)
+ image_id = Column(Integer, primary_key=True)
+ pitch = Column(Float, nullable=False)
+ roll = Column(Float, nullable=False)
+ yaw = Column(Float, nullable=False)
+ def toJSON(self):
+ return {
+ 'id': self.id,
+ 'image_id': self.image_id,
+ 'pitch': self.pitch,
+ 'roll': self.roll,
+ 'yaw': self.yaw,
+ }
+ return Pose