diff options
| author | jules@lens <julescarbon@gmail.com> | 2018-09-17 22:55:37 +0200 |
|---|---|---|
| committer | jules@lens <julescarbon@gmail.com> | 2018-09-17 22:55:37 +0200 |
| commit | 002251cccf6afa3359b462726d226eaa2de6c7cb (patch) | |
| tree | 3fbbfb05ac2841c7d307c64aa9dafd15ade74a4a | |
| parent | 79bc4d248b97ccc70e5170f964a2cf585a2d9311 (diff) | |
why is this train script so weird
| -rwxr-xr-x | train.py | 9 |
1 files changed, 4 insertions, 5 deletions
@@ -111,11 +111,10 @@ for epoch in range(start_epoch, start_epoch + opt.niter): (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time)) ### save model for this epoch - if epoch % opt.save_epoch_freq == 0: - print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps)) - model.module.save('latest') - model.module.save(epoch) - np.savetxt(iter_path, (epoch+1, 0), delimiter=',', fmt='%d') + print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps)) + model.module.save('latest') + model.module.save(epoch) + np.savetxt(iter_path, (epoch+1, 0), delimiter=',', fmt='%d') ### instead of only training the local enhancer, train the entire network after certain iterations # if (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global): |
