summaryrefslogtreecommitdiff
path: root/megapixels/commands/datasets/preproc_wiki_imdb.py
blob: 66680ed0cd2cd9278ba7c86b9e7056201334f9b1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
from glob import glob
import os
from os.path import join
from pathlib import Path

import click

from app.settings import types
from app.utils import click_utils
from app.settings import app_cfg as cfg
from app.utils import logger_utils

import dlib
import pandas as pd
from PIL import Image, ImageOps, ImageFilter
from app.utils import file_utils, im_utils


log = logger_utils.Logger.getLogger()

@click.command()
@click.option('-i', '--input', 'opt_fp_in', required=True,
  help='Input directory')
@click.option('-o', '--output', 'opt_fp_out',
  help='Output directory')
@click.option('--videos', 'opt_dir_videos',
  help='Output directory')
@click.option('--action', 'opt_action', 
  type=click.Choice(['info', 'faces', 'rename', 'download', 'metadata', 'split_frames']),
  default='info',
  help='Command action')
@click.pass_context
def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_videos, opt_action):
  """YTMU utils"""

  
  from tqdm import tqdm

  # -------------------------------------------------
  # process  
  
  if opt_action == 'metadata':
    # downloads video metadata with ytdl
    handle_metadata(opt_fp_in, opt_fp_out)
  elif opt_action == 'download':
    # downloads video files with ytdl
    handle_download(opt_fp_in, opt_fp_out)
  elif opt_action == 'info':
    # converts original data file to clean CSV
    handle_info()
  elif opt_action == 'rename':
    # rename the videos to video ID
    handle_rename(opt_fp_in, opt_fp_out, opt_dir_videos)
  elif opt_action == 'split_frames':
    # rename the videos to video ID
    handle_split_frames(opt_fp_in, opt_fp_out, opt_dir_videos)
  



# ----------------------------------------------------
# handlers

def handle_split_frames(fp_in, dir_out, dir_videos):
  if not dir_out or not dir_videos:
    log.error('-o/--output and --videos required')
    return
  import cv2 as cv
  from tqdm import tqdm
  from app.processors import face_detector
  detector = face_detector.DetectorDLIBCNN()

  # get file list
  fp_videos = glob(join(dir_videos, '*.mp4'))
  fp_videos += glob(join(dir_videos, '*.webm'))
  fp_videos += glob(join(dir_videos, '*.mkv'))
  face_interval = 30
  frame_interval_count = 0
  frame_count = 0

  file_utils.mkdirs(dir_out)

  for fp_video in tqdm(fp_videos):
    # log.debug('opening: {}'.format(fp_video))
    video = cv.VideoCapture(fp_video)
    while video.isOpened():
      res, frame = video.read()
      if not res:
        break

      frame_count += 1  # for naming
      frame_interval_count += 1  # for interval
      bboxes = detector.detect(frame, opt_size=(320, 240), opt_pyramids=0)
      if len(bboxes) > 0 and frame_interval_count >= face_interval:
        # save frame
        fp_frame = join(dir_out, '{}_{}.jpg'.format(Path(fp_video).stem, file_utils.zpad(frame_count)))
        cv.imwrite(fp_frame, frame)
        frame_interval_count = 0


def handle_metadata(fp_in, fp_out):
  
  keys = ['description', 'average_rating', 'dislike_count', 'categories', 
  'thumbnail', 'title', 'upload_date', 'uploader_url', 'uploader_id',
  'fps', 'height', 'width', 'like_count', 'license', 'tags']

  import youtube_dl

  ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})

  df = pd.read_csv(fp_in)
  data_exp = []

  for i, row in df.iterrows():
    video_data = {'url': row['url'], 'id': row['id']}
    try:
      with ydl:
        url = 'http://www.youtube.com/watch?v={}'.format(row['id'])
        result = ydl.extract_info(url, download=False)
      video = result['entries'][0] if 'entries' in result else result
      for k in keys:
        val = video[k]
        if k == 'title':
          log.debug(val)
        if type(val) == list:
          val = '; '.join(val)
        if type(val) == str:
          video_data[k] = str(val).replace(',',';')
      # log.debug('video_data: {}'.format(video_data))
    except Exception as e:
      log.warn('video unavilable: {}'.format(row['url']))
      log.error(e)
      continue
    data_exp.append(video_data)

  df_exp = pd.DataFrame.from_dict(data_exp)
  df_exp.to_csv(fp_out)


def handle_download(fp_in, dir_out):
  import youtube_dl
  df = pd.read_csv(fp_in)
  fp_videos = glob(join(dir_out, '*.mp4'))
  fp_videos += glob(join(dir_out, '*.webm'))
  fp_videos += glob(join(dir_out, '*.mkv'))
  
  ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})

  for i, row in df.iterrows():
    vid = row['id']
    found = False
    for fp_video in fp_videos:
      if vid in fp_video:
        log.debug('skip: {}'.format(vid))
        found = True
    if not found:
      try:
        with ydl:
          ydl.download(['http://www.youtube.com/watch?v={}'.format(vid)])
      except:
        log.error('could not dl: {}'.format(vid))


def handle_info(fp_in, fp_out):
  if not fp_out:
    log.error('--output required')
    return
  urls = file_utils.load_text(fp_in)
  videos  = []
  for url in urls:
    splits = url.split('v=')
    try:
      vid = splits[1]
      vid = vid.split('&')[0]
      videos.append({'url': url, 'id': vid})
    except:
      log.warn('no video id for {}'.format(url))
  # convert to df
  df = pd.DataFrame.from_dict(videos)
  df.to_csv(opt_fp_out)

  
def handle_rename(fp_in, fp_out, dir_videos):
  import shutil
  
  if not dir_videos:
    log.error('--videos required')
    return

  fp_videos = glob(join(dir_videos, '*.mp4'))
  fp_videos += glob(join(dir_videos, '*.webm'))
  fp_videos += glob(join(dir_videos, '*.mkv'))

  df = pd.read_csv(fp_in)

  for i, row in df.iterrows():
    vid = row['id']
    fp_videos_copy = fp_videos.copy()
    for fp_video in fp_videos:
      if vid in fp_video:
        dst = join(dir_videos, '{}{}'.format(vid, Path(fp_video).suffix))
        shutil.move(fp_video, dst)
        log.debug('move {} to {}'.format(fp_video, dst))
        fp_videos.remove(fp_video)
        break