From 448b6ec5ea8f7eee36cc9053e7f124e0ab55d889 Mon Sep 17 00:00:00 2001
From: Cameron
Date: Mon, 20 Mar 2017 00:54:28 -0700
Subject: Update README.md
---
README.md | 2 ++
1 file changed, 2 insertions(+)
diff --git a/README.md b/README.md
index 3c2196f..1375571 100644
--- a/README.md
+++ b/README.md
@@ -70,6 +70,7 @@ More than one style image can be used to blend multiple artistic styles.
+
*Top row (left to right)*: [The Starry Night](https://www.wikiart.org/en/vincent-van-gogh/the-starry-night-1889) + [The Scream](https://www.wikiart.org/en/edvard-munch/the-scream-1893), [The Scream](https://www.wikiart.org/en/edvard-munch/the-scream-1893) + [Composition VII](https://www.wikiart.org/en/wassily-kandinsky/composition-vii-1913), [Seated Nude](http://www.pablopicasso.org/seated-nude.jsp) + [Composition VII](https://www.wikiart.org/en/wassily-kandinsky/composition-vii-1913)
*Bottom row (left to right)*: [Seated Nude](http://www.pablopicasso.org/seated-nude.jsp) + [The Starry Night](https://www.wikiart.org/en/vincent-van-gogh/the-starry-night-1889), [Oversoul](http://alexgrey.com/art/paintings/soul/oversoul/) + [Freshness of Cold](https://afremov.com/FRESHNESS-OF-COLD-PALETTE-KNIFE-Oil-Painting-On-Canvas-By-Leonid-Afremov-Size-30-x40.html), [David Bowie](http://www.francoise-nielly.com/index.php/galerie/index/56) + [Skull](https://www.wikiart.org/en/jean-michel-basquiat/head)
@@ -85,6 +86,7 @@ When using multiple style images, the degree of blending between the images can
+
*Top row (left to right)*: content image, .2 [The Starry Night](https://www.wikiart.org/en/vincent-van-gogh/the-starry-night-1889) + .8 [The Scream](https://www.wikiart.org/en/edvard-munch/the-scream-1893), .8 [The Starry Night](https://www.wikiart.org/en/vincent-van-gogh/the-starry-night-1889) + .2 [The Scream](https://www.wikiart.org/en/edvard-munch/the-scream-1893)
*Bottom row (left to right)*: .2 [Oversoul](http://alexgrey.com/art/paintings/soul/oversoul/) + .8 [Freshness of Cold](https://afremov.com/FRESHNESS-OF-COLD-PALETTE-KNIFE-Oil-Painting-On-Canvas-By-Leonid-Afremov-Size-30-x40.html), .5 [Oversoul](http://alexgrey.com/art/paintings/soul/oversoul/) + .5 [Freshness of Cold](https://afremov.com/FRESHNESS-OF-COLD-PALETTE-KNIFE-Oil-Painting-On-Canvas-By-Leonid-Afremov-Size-30-x40.html), .8 [Oversoul](http://alexgrey.com/art/paintings/soul/oversoul/) + .2 [Freshness of Cold](https://afremov.com/FRESHNESS-OF-COLD-PALETTE-KNIFE-Oil-Painting-On-Canvas-By-Leonid-Afremov-Size-30-x40.html)
--
cgit v1.2.3-70-g09d2
From eedd2b85b224cd6810f6941b637eba25acfc9533 Mon Sep 17 00:00:00 2001
From: Cameron
Date: Fri, 24 Mar 2017 11:46:43 -0700
Subject: Update README.md
---
README.md | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/README.md b/README.md
index 1375571..8023941 100644
--- a/README.md
+++ b/README.md
@@ -103,6 +103,7 @@ Here we reproduce Figure 1 and Figure 2 in the third paper using luminance-only
+
*Left to right*: content image, stylized image, stylized image with the original colors of the content image
### Textures
@@ -153,6 +154,7 @@ Multiple styles can be transferred to the foreground and background of the conte
+
*Left to right*: content image, foreground style, background style, foreground mask, background mask, stylized image
### Video
@@ -165,6 +167,7 @@ Animations can be rendered by applying the algorithm to each source frame. For
+
*Top row (left to right)*: source frames, ground-truth optical flow visualized
*Bottom row (left to right)*: disoccluded regions and motion boundaries, stylized frames
@@ -181,6 +184,7 @@ Here we reproduce Figure 6 from the first paper:
+
*Top row (left to right)*: Initialized with the content image, the style image, white noise (RNG seed 1)
*Bottom row (left to right)*: Initialized with white noise (RNG seeds 2, 3, 4)
@@ -232,6 +236,7 @@ Here we reproduce Figure 3 from [the original paper](https://arxiv.org/abs/1508.
 |
+
*Rows*: increasing subsets of CNN layers; i.e. 'conv4_1' means using 'conv1_1', 'conv2_1', 'conv3_1', 'conv4_1'.
*Columns*: alpha/beta ratio of the the content and style reconstruction (see Content / Style Tradeoff).
--
cgit v1.2.3-70-g09d2
From 18b9314f8c45edcb8a78e33ea8b4acbdeee63908 Mon Sep 17 00:00:00 2001
From: Cameron
Date: Tue, 28 Mar 2017 17:44:17 -0700
Subject: Update README.md
---
README.md | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 8023941..5b58f81 100644
--- a/README.md
+++ b/README.md
@@ -356,12 +356,15 @@ python neural_style.py --video \
#### Optimization Arguments
* `--optimizer`: Loss minimization optimizer. L-BFGS gives better results. Adam uses less memory. *Choices*: `lbfgs`, `adam`. *Default*: `lbfgs`
* `--learning_rate`: Learning-rate parameter for the Adam optimizer. *Default*: `1e0`
+
+
* `--max_iterations`: Max number of iterations for the Adam or L-BFGS optimizer. *Default*: `1000`
* `--print_iterations`: Number of iterations between optimizer print statements. *Default*: `50`
-* `--content_loss_function`: Different constants K in the content loss function. *Choices*: `1`, `2`, `3`. *Default*: `1`
+* `--content_loss_function`: Different constants K in the content loss function. *Choices*: `1`, `2`, `3`. *Default*: `1`
+
--
cgit v1.2.3-70-g09d2
From 2901b7aca6ae43847d6b329609f1ee55a1ce6f3f Mon Sep 17 00:00:00 2001
From: Cameron
Date: Sun, 9 Apr 2017 16:44:08 -0700
Subject: Simplified pre/post-processing
---
neural_style.py | 30 ++++++++++++++----------------
1 file changed, 14 insertions(+), 16 deletions(-)
diff --git a/neural_style.py b/neural_style.py
index 13ba513..b717216 100644
--- a/neural_style.py
+++ b/neural_style.py
@@ -230,9 +230,8 @@ def parse_args():
pre-trained vgg19 convolutional neural network
remark: layers are manually initialized for clarity.
'''
-vgg19_mean = np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3))
-def build_vgg19(input_img):
+def build_model(input_img):
if args.verbose: print('\nBUILDING VGG-19 NETWORK')
net = {}
_, h, w, d = input_img.shape
@@ -470,7 +469,6 @@ def sum_shortterm_temporal_losses(sess, net, frame, input_img):
'''
denoising loss function
- remark: not sure this does anything significant.
'''
def sum_total_variation_losses(sess, net, input_img):
b, h, w, d = input_img.shape
@@ -493,23 +491,23 @@ def read_image(path):
img = cv2.imread(path, cv2.IMREAD_COLOR)
check_image(img, path)
img = img.astype(np.float32)
- img = preprocess(img, vgg19_mean)
+ img = preprocess(img)
return img
def write_image(path, img):
- img = postprocess(img, vgg19_mean)
+ img = postprocess(img)
cv2.imwrite(path, img)
-def preprocess(img, mean):
+def preprocess(img):
# bgr to rgb
img = img[...,::-1]
# shape (h, w, d) to (1, h, w, d)
img = img[np.newaxis,:,:,:]
- img -= mean
+ img -= np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3))
return img
-def postprocess(img, mean):
- img += mean
+def postprocess(img):
+ img += np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3))
# shape (1, h, w, d) to (h, w, d)
img = img[0]
img = np.clip(img, 0, 255).astype('uint8')
@@ -565,7 +563,7 @@ def check_image(img, path):
def stylize(content_img, style_imgs, init_img, frame=None):
with tf.device(args.device), tf.Session() as sess:
# setup network
- net = build_vgg19(content_img)
+ net = build_model(content_img)
# style loss
if args.style_mask:
@@ -731,7 +729,7 @@ def get_content_image(content_img):
if w > mx:
h = (float(mx) / float(w)) * h
img = cv2.resize(img, dsize=(mx, int(h)), interpolation=cv2.INTER_AREA)
- img = preprocess(img, vgg19_mean)
+ img = preprocess(img)
return img
def get_style_images(content_img):
@@ -744,7 +742,7 @@ def get_style_images(content_img):
check_image(img, path)
img = img.astype(np.float32)
img = cv2.resize(img, dsize=(cw, ch), interpolation=cv2.INTER_AREA)
- img = preprocess(img, vgg19_mean)
+ img = preprocess(img)
style_imgs.append(img)
return style_imgs
@@ -781,7 +779,7 @@ def get_prev_warped_frame(frame):
path = os.path.join(args.video_input_dir, fn)
flow = read_flow_file(path)
warped_img = warp_image(prev_img, flow).astype(np.float32)
- img = preprocess(warped_img, vgg19_mean)
+ img = preprocess(warped_img)
return img
def get_content_weights(frame, prev_frame):
@@ -807,8 +805,8 @@ def warp_image(src, flow):
return dst
def convert_to_original_colors(content_img, stylized_img):
- content_img = postprocess(content_img, vgg19_mean)
- stylized_img = postprocess(stylized_img, vgg19_mean)
+ content_img = postprocess(content_img)
+ stylized_img = postprocess(stylized_img)
if args.color_convert_type == 'yuv':
cvt_type = cv2.COLOR_BGR2YUV
inv_cvt_type = cv2.COLOR_YUV2BGR
@@ -827,7 +825,7 @@ def convert_to_original_colors(content_img, stylized_img):
_, c2, c3 = cv2.split(content_cvt)
merged = cv2.merge((c1, c2, c3))
dst = cv2.cvtColor(merged, inv_cvt_type).astype(np.float32)
- dst = preprocess(dst, vgg19_mean)
+ dst = preprocess(dst)
return dst
def render_single_image():
--
cgit v1.2.3-70-g09d2