From fa0a2624d94b91c3b1ddf6daf9da7ac0a655cf66 Mon Sep 17 00:00:00 2001 From: taesung89 Date: Wed, 19 Jul 2017 17:15:07 -0700 Subject: Update README.md removed use_dropout option because it is on by default --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 2f3d136..d01b13b 100644 --- a/README.md +++ b/README.md @@ -87,7 +87,7 @@ bash ./datasets/download_pix2pix_dataset.sh facades - Train a model: ```bash #!./scripts/train_pix2pix.sh -python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --which_model_netG unet_256 --which_direction BtoA --lambda_A 100 --dataset_mode aligned --use_dropout --no_lsgan --norm batch +python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --which_model_netG unet_256 --which_direction BtoA --lambda_A 100 --dataset_mode aligned --no_lsgan --norm batch ``` - To view training results and loss plots, run `python -m visdom.server` and click the URL http://localhost:8097. To see more intermediate results, check out `./checkpoints/facades_pix2pix/web/index.html` - Test the model (`bash ./scripts/test_pix2pix.sh`): @@ -103,7 +103,7 @@ More example scripts can be found at `scripts` directory. If you would like to apply a pre-trained model to a collection of input photos (without image pairs), please use `--dataset_mode single` and `--model test` options. Here is a script to apply a pix2pix model to facade label maps (stored in the directory `facades/testB`). ``` bash #!./scripts/test_single.sh -python test.py --dataroot ./datasets/facades/testB/ --name facades_pix2pix --model test --which_model_netG unet_256 --which_direction BtoA --dataset_mode single --use_dropout +python test.py --dataroot ./datasets/facades/testB/ --name facades_pix2pix --model test --which_model_netG unet_256 --which_direction BtoA --dataset_mode single ``` ## Training/test Details -- cgit v1.2.3-70-g09d2