summaryrefslogtreecommitdiff
path: root/check
diff options
context:
space:
mode:
authorJules Laplace <julescarbon@gmail.com>2019-04-27 21:00:32 +0200
committerJules Laplace <julescarbon@gmail.com>2019-04-27 21:00:32 +0200
commit838508a639c4ff6bc2805b8b1552e9a90f3fe63d (patch)
tree8646585a1a9fa09aba61ee2ec78d8092480f8b23 /check
parent60e19825c2e8aef910b701bf2ca060d7fcbac78a (diff)
comment out all opencv stuff
Diffstat (limited to 'check')
-rw-r--r--check/app/utils/im_utils.py280
1 files changed, 140 insertions, 140 deletions
diff --git a/check/app/utils/im_utils.py b/check/app/utils/im_utils.py
index 1d1affb..6b75bc3 100644
--- a/check/app/utils/im_utils.py
+++ b/check/app/utils/im_utils.py
@@ -1,7 +1,7 @@
import sys
import os
from os.path import join
-import cv2 as cv
+# import cv2 as cv
import imagehash
from PIL import Image, ImageDraw, ImageFilter, ImageOps
from skimage.filters.rank import entropy
@@ -64,30 +64,30 @@ def compute_features(fe,frames,phashes,phash_thresh=1):
return vals
-def np2pil(im, swap=True):
- """Ensure image is Pillow format
- :param im: image in numpy or PIL.Image format
- :returns: image in Pillow RGB format
- """
- try:
- im.verify()
- return im
- except:
- if swap:
- im = cv.cvtColor(im,cv.COLOR_BGR2RGB)
- return Image.fromarray(im.astype('uint8'), 'RGB')
+# def np2pil(im, swap=True):
+# """Ensure image is Pillow format
+# :param im: image in numpy or PIL.Image format
+# :returns: image in Pillow RGB format
+# """
+# try:
+# im.verify()
+# return im
+# except:
+# if swap:
+# im = cv.cvtColor(im,cv.COLOR_BGR2RGB)
+# return Image.fromarray(im.astype('uint8'), 'RGB')
-def pil2np(im, swap=True):
- """Ensure image is Numpy.ndarry format
- :param im: image in numpy or PIL.Image format
- :returns: image in Numpy uint8 format
- """
- if type(im) == np.ndarray:
- return im
- im = np.asarray(im, np.uint8)
- if swap:
- im = cv.cvtColor(im, cv.COLOR_RGB2BGR)
- return im
+# def pil2np(im, swap=True):
+# """Ensure image is Numpy.ndarry format
+# :param im: image in numpy or PIL.Image format
+# :returns: image in Numpy uint8 format
+# """
+# if type(im) == np.ndarray:
+# return im
+# im = np.asarray(im, np.uint8)
+# if swap:
+# im = cv.cvtColor(im, cv.COLOR_RGB2BGR)
+# return im
def resize(im, width=0, height=0):
@@ -150,39 +150,39 @@ def filter_pixellate(im,num_cells):
# Utilities for analyzing frames
-def compute_gray(im):
- im = cv.cvtColor(im,cv.COLOR_BGR2GRAY)
- n_vals = float(im.shape[0] * im.shape[1])
- avg = np.sum(im[:]) / n_vals
- return avg
+# def compute_gray(im):
+# im = cv.cvtColor(im,cv.COLOR_BGR2GRAY)
+# n_vals = float(im.shape[0] * im.shape[1])
+# avg = np.sum(im[:]) / n_vals
+# return avg
-def compute_rgb(im):
- im = cv.cvtColor(im,cv.COLOR_BGR2RGB)
- n_vals = float(im.shape[0] * im.shape[1])
- avg_r = np.sum(im[:,:,0]) / n_vals
- avg_g = np.sum(im[:,:,1]) / n_vals
- avg_b = np.sum(im[:,:,2]) / n_vals
- avg_rgb = np.sum(im[:,:,:]) / (n_vals * 3.0)
- return avg_r, avg_b, avg_g, avg_rgb
+# def compute_rgb(im):
+# im = cv.cvtColor(im,cv.COLOR_BGR2RGB)
+# n_vals = float(im.shape[0] * im.shape[1])
+# avg_r = np.sum(im[:,:,0]) / n_vals
+# avg_g = np.sum(im[:,:,1]) / n_vals
+# avg_b = np.sum(im[:,:,2]) / n_vals
+# avg_rgb = np.sum(im[:,:,:]) / (n_vals * 3.0)
+# return avg_r, avg_b, avg_g, avg_rgb
-def compute_hsv(im):
- im = cv.cvtColor(im,cv.COLOR_BGR2HSV)
- n_vals = float(im.shape[0] * im.shape[1])
- avg_h = np.sum(frame[:,:,0]) / n_vals
- avg_s = np.sum(frame[:,:,1]) / n_vals
- avg_v = np.sum(frame[:,:,2]) / n_vals
- avg_hsv = np.sum(frame[:,:,:]) / (n_vals * 3.0)
- return avg_h, avg_s, avg_v, avg_hsv
+# def compute_hsv(im):
+# im = cv.cvtColor(im,cv.COLOR_BGR2HSV)
+# n_vals = float(im.shape[0] * im.shape[1])
+# avg_h = np.sum(frame[:,:,0]) / n_vals
+# avg_s = np.sum(frame[:,:,1]) / n_vals
+# avg_v = np.sum(frame[:,:,2]) / n_vals
+# avg_hsv = np.sum(frame[:,:,:]) / (n_vals * 3.0)
+# return avg_h, avg_s, avg_v, avg_hsv
-def pys_dhash(im, hashSize=8):
- # resize the input image, adding a single column (width) so we
- # can compute the horizontal gradient
- resized = cv.resize(im, (hashSize + 1, hashSize))
- # compute the (relative) horizontal gradient between adjacent
- # column pixels
- diff = resized[:, 1:] > resized[:, :-1]
- # convert the difference image to a hash
- return sum([2 ** i for (i, v) in enumerate(diff.flatten()) if v])
+# def pys_dhash(im, hashSize=8):
+# # resize the input image, adding a single column (width) so we
+# # can compute the horizontal gradient
+# resized = cv.resize(im, (hashSize + 1, hashSize))
+# # compute the (relative) horizontal gradient between adjacent
+# # column pixels
+# diff = resized[:, 1:] > resized[:, :-1]
+# # convert the difference image to a hash
+# return sum([2 ** i for (i, v) in enumerate(diff.flatten()) if v])
############################################
@@ -283,62 +283,62 @@ def compute_entropy(im):
# OpenCV
############################################
-def bgr2gray(im):
- """Wrapper for cv2.cvtColor transform
- :param im: Numpy.ndarray (BGR)
- :returns: Numpy.ndarray (Gray)
- """
- return cv.cvtColor(im,cv.COLOR_BGR2GRAY)
+# def bgr2gray(im):
+# """Wrapper for cv2.cvtColor transform
+# :param im: Numpy.ndarray (BGR)
+# :returns: Numpy.ndarray (Gray)
+# """
+# return cv.cvtColor(im,cv.COLOR_BGR2GRAY)
-def gray2bgr(im):
- """Wrapper for cv2.cvtColor transform
- :param im: Numpy.ndarray (Gray)
- :returns: Numpy.ndarray (BGR)
- """
- return cv.cvtColor(im,cv.COLOR_GRAY2BGR)
+# def gray2bgr(im):
+# """Wrapper for cv2.cvtColor transform
+# :param im: Numpy.ndarray (Gray)
+# :returns: Numpy.ndarray (BGR)
+# """
+# return cv.cvtColor(im,cv.COLOR_GRAY2BGR)
-def bgr2rgb(im):
- """Wrapper for cv2.cvtColor transform
- :param im: Numpy.ndarray (BGR)
- :returns: Numpy.ndarray (RGB)
- """
- return cv.cvtColor(im,cv.COLOR_BGR2RGB)
+# def bgr2rgb(im):
+# """Wrapper for cv2.cvtColor transform
+# :param im: Numpy.ndarray (BGR)
+# :returns: Numpy.ndarray (RGB)
+# """
+# return cv.cvtColor(im,cv.COLOR_BGR2RGB)
-def compute_laplacian(im):
- # below 100 is usually blurry
- return cv.Laplacian(im, cv.CV_64F).var()
+# def compute_laplacian(im):
+# # below 100 is usually blurry
+# return cv.Laplacian(im, cv.CV_64F).var()
-# http://radjkarl.github.io/imgProcessor/index.html#
+# # http://radjkarl.github.io/imgProcessor/index.html#
-def modifiedLaplacian(img):
- ''''LAPM' algorithm (Nayar89)'''
- M = np.array([-1, 2, -1])
- G = cv.getGaussianKernel(ksize=3, sigma=-1)
- Lx = cv.sepFilter2D(src=img, ddepth=cv.CV_64F, kernelX=M, kernelY=G)
- Ly = cv.sepFilter2D(src=img, ddepth=cv.CV_64F, kernelX=G, kernelY=M)
- FM = np.abs(Lx) + np.abs(Ly)
- return cv.mean(FM)[0]
+# def modifiedLaplacian(img):
+# ''''LAPM' algorithm (Nayar89)'''
+# M = np.array([-1, 2, -1])
+# G = cv.getGaussianKernel(ksize=3, sigma=-1)
+# Lx = cv.sepFilter2D(src=img, ddepth=cv.CV_64F, kernelX=M, kernelY=G)
+# Ly = cv.sepFilter2D(src=img, ddepth=cv.CV_64F, kernelX=G, kernelY=M)
+# FM = np.abs(Lx) + np.abs(Ly)
+# return cv.mean(FM)[0]
-def varianceOfLaplacian(img):
- ''''LAPV' algorithm (Pech2000)'''
- lap = cv.Laplacian(img, ddepth=-1)#cv.cv.CV_64F)
- stdev = cv.meanStdDev(lap)[1]
- s = stdev[0]**2
- return s[0]
+# def varianceOfLaplacian(img):
+# ''''LAPV' algorithm (Pech2000)'''
+# lap = cv.Laplacian(img, ddepth=-1)#cv.cv.CV_64F)
+# stdev = cv.meanStdDev(lap)[1]
+# s = stdev[0]**2
+# return s[0]
-def tenengrad(img, ksize=3):
- ''''TENG' algorithm (Krotkov86)'''
- Gx = cv.Sobel(img, ddepth=cv.CV_64F, dx=1, dy=0, ksize=ksize)
- Gy = cv.Sobel(img, ddepth=cv.CV_64F, dx=0, dy=1, ksize=ksize)
- FM = Gx**2 + Gy**2
- return cv.mean(FM)[0]
+# def tenengrad(img, ksize=3):
+# ''''TENG' algorithm (Krotkov86)'''
+# Gx = cv.Sobel(img, ddepth=cv.CV_64F, dx=1, dy=0, ksize=ksize)
+# Gy = cv.Sobel(img, ddepth=cv.CV_64F, dx=0, dy=1, ksize=ksize)
+# FM = Gx**2 + Gy**2
+# return cv.mean(FM)[0]
-def normalizedGraylevelVariance(img):
- ''''GLVN' algorithm (Santos97)'''
- mean, stdev = cv.meanStdDev(img)
- s = stdev[0]**2 / mean[0]
- return s[0]
+# def normalizedGraylevelVariance(img):
+# ''''GLVN' algorithm (Santos97)'''
+# mean, stdev = cv.meanStdDev(img)
+# s = stdev[0]**2 / mean[0]
+# return s[0]
def compute_if_blank(im,width=100,sigma=0,thresh_canny=.1,thresh_mean=4,mask=None):
# im is graysacale np
@@ -363,48 +363,48 @@ def print_timing(t,n):
print('Elapsed time: {:.2f}'.format(t))
print('FPS: {:.2f}'.format(n/t))
-def vid2frames(fpath, limit=5000, width=None, idxs=None):
- """Convert a video file into list of frames
- :param fpath: filepath to the video file
- :param limit: maximum number of frames to read
- :param fpath: the indices of frames to keep (rest are skipped)
- :returns: (fps, number of frames, list of Numpy.ndarray frames)
- """
- frames = []
- try:
- cap = cv.VideoCapture(fpath)
- except:
- print('[-] Error. Could not read video file: {}'.format(fpath))
- try:
- cap.release()
- except:
- pass
- return frames
+# def vid2frames(fpath, limit=5000, width=None, idxs=None):
+# """Convert a video file into list of frames
+# :param fpath: filepath to the video file
+# :param limit: maximum number of frames to read
+# :param fpath: the indices of frames to keep (rest are skipped)
+# :returns: (fps, number of frames, list of Numpy.ndarray frames)
+# """
+# frames = []
+# try:
+# cap = cv.VideoCapture(fpath)
+# except:
+# print('[-] Error. Could not read video file: {}'.format(fpath))
+# try:
+# cap.release()
+# except:
+# pass
+# return frames
- fps = cap.get(cv.CAP_PROP_FPS)
- nframes = int(cap.get(cv.CAP_PROP_FRAME_COUNT))
+# fps = cap.get(cv.CAP_PROP_FPS)
+# nframes = int(cap.get(cv.CAP_PROP_FRAME_COUNT))
- if idxs is not None:
- # read sample indices by seeking to frame index
- for idx in idxs:
- cap.set(cv.CAP_PROP_POS_FRAMES, idx)
- res, frame = cap.read()
- if width is not None:
- frame = imutils.resize(frame, width=width)
- frames.append(frame)
- else:
- while(True and len(frames) < limit):
- res, frame = cap.read()
- if not res:
- break
- if width is not None:
- frame = imutils.resize(frame, width=width)
- frames.append(frame)
+# if idxs is not None:
+# # read sample indices by seeking to frame index
+# for idx in idxs:
+# cap.set(cv.CAP_PROP_POS_FRAMES, idx)
+# res, frame = cap.read()
+# if width is not None:
+# frame = imutils.resize(frame, width=width)
+# frames.append(frame)
+# else:
+# while(True and len(frames) < limit):
+# res, frame = cap.read()
+# if not res:
+# break
+# if width is not None:
+# frame = imutils.resize(frame, width=width)
+# frames.append(frame)
- cap.release()
- del cap
- #return fps,nframes,frames
- return frames
+# cap.release()
+# del cap
+# #return fps,nframes,frames
+# return frames
def convolve_filter(vals,filters=[1]):
for k in filters: