diff options
| author | cam <cameron@ideum.com> | 2016-11-06 22:43:30 -0700 |
|---|---|---|
| committer | cam <cameron@ideum.com> | 2016-11-06 22:43:30 -0700 |
| commit | 9c9ac0121ad8ebf1a7bb1551520096efcbdfbb4a (patch) | |
| tree | 08c543f941c7e7b70c4270f1957c326fcd00aabe | |
| parent | 54a5af4d6550df66306a83555dac830d10edb17c (diff) | |
| parent | 03e7c07720fb6a1ac166016fd1c33407fafee300 (diff) | |
Fixed commmit.
| -rw-r--r-- | README.md | 5 | ||||
| -rw-r--r-- | neural_style.py | 5 |
2 files changed, 6 insertions, 4 deletions
@@ -339,7 +339,10 @@ python neural_style.py --video \ #### Optimization Arguments * `--optimizer`: Loss minimization optimizer. L-BFGS gives better results. Adam uses less memory. *Choices*: `lbfgs`, `adam`. *Default*: `lbfgs` -* `--learning_rate`: Learning-rate parameter for the Adam optimizer. *Default*: `1e1` +* `--learning_rate`: Learning-rate parameter for the Adam optimizer. *Default*: `1e0` +<p align="center"> +<img src="examples/equations/plot.png" width="321px"> +</p> * `--max_iterations`: Max number of iterations for the Adam or L-BFGS optimizer. *Default*: `1000` * `--print_iterations`: Number of iterations between optimizer print statements. *Default*: `50` * `--content_loss_function`: Different constants K in the content loss function. *Choices*: `1`, `2`, `3`. *Default*: `1` diff --git a/neural_style.py b/neural_style.py index 7e6e20f..e199678 100644 --- a/neural_style.py +++ b/neural_style.py @@ -81,8 +81,7 @@ def parse_args(): default=['relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1'], help='VGG19 layers used for the style image. (default: %(default)s)') - parser.add_argument('--content_layer_weights', nargs='+', - type=float, + parser.add_argument('--content_layer_weights', nargs='+', type=float, default=[1.0], help='Contributions (weights) of each content layer to loss. (default: %(default)s)') @@ -143,7 +142,7 @@ def parse_args(): help='Loss minimization optimizer. L-BFGS gives better results. Adam uses less memory. (default|recommended: %(default)s)') parser.add_argument('--learning_rate', type=float, - default=1e1, + default=1e0, help='Learning rate parameter for the Adam optimizer. (default: %(default)s)') parser.add_argument('--max_iterations', type=int, |
