diff options
| author | Boris Fomitchev <bfomitchev@nvidia.com> | 2018-05-08 00:56:35 -0700 |
|---|---|---|
| committer | Boris Fomitchev <bfomitchev@nvidia.com> | 2018-05-08 00:56:35 -0700 |
| commit | 4ca6b1610f9fa65f8bd7d7c15059bfde18a2f02a (patch) | |
| tree | ec2eeb09cdef6a70ea5612c3e6aa91ed2849414a /models/base_model.py | |
| parent | 736a2dc9afef418820e9c52f4f3b38460360b9f2 (diff) | |
Added data size and ONNX export options, FP16 inference is working
Diffstat (limited to 'models/base_model.py')
| -rwxr-xr-x | models/base_model.py | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/models/base_model.py b/models/base_model.py index 88e0587..2cda12f 100755 --- a/models/base_model.py +++ b/models/base_model.py @@ -68,7 +68,8 @@ class BaseModel(torch.nn.Module): try: pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} network.load_state_dict(pretrained_dict) - print('Pretrained network %s has excessive layers; Only loading layers that are used' % network_label) + if self.opt.verbose: + print('Pretrained network %s has excessive layers; Only loading layers that are used' % network_label) except: print('Pretrained network %s has fewer layers; The following are not initialized:' % network_label) if sys.version_info >= (3,0): @@ -82,8 +83,9 @@ class BaseModel(torch.nn.Module): for k, v in model_dict.items(): if k not in pretrained_dict or v.size() != pretrained_dict[k].size(): - not_initialized.add(k.split('.')[0]) - print(sorted(not_initialized)) + not_initialized.add(k.split('.')[0]) + if self.opt.verbose: + print(sorted(not_initialized)) network.load_state_dict(model_dict) def update_learning_rate(): |
