diff options
| author | StevenLiuWen <liuwen@shanghaitech.edu.cn> | 2018-03-13 03:28:06 -0400 |
|---|---|---|
| committer | StevenLiuWen <liuwen@shanghaitech.edu.cn> | 2018-03-13 03:28:06 -0400 |
| commit | fede6ca1dd0077ff509d84bd24028cc7a93bb119 (patch) | |
| tree | af7f6e759b5dec4fc2964daed09e903958b919ed /Codes/training_hyper_params/hyper_params.ini | |
first commit
Diffstat (limited to 'Codes/training_hyper_params/hyper_params.ini')
| -rw-r--r-- | Codes/training_hyper_params/hyper_params.ini | 103 |
1 files changed, 103 insertions, 0 deletions
diff --git a/Codes/training_hyper_params/hyper_params.ini b/Codes/training_hyper_params/hyper_params.ini new file mode 100644 index 0000000..99dbf00 --- /dev/null +++ b/Codes/training_hyper_params/hyper_params.ini @@ -0,0 +1,103 @@ +[ped2] +# for lp loss. e.g, 1 or 2 for l1 and l2 loss, respectively) +L_NUM = 2 +# the power to which each gradient term is raised in GDL loss +ALPHA_NUM = 1 +# the percentage of the adversarial loss to use in the combined loss +LAM_ADV = 0.05 +# the percentage of the lp loss to use in the combined loss +LAM_LP = 1 +# the percentage of the GDL loss to use in the combined loss +LAM_GDL = 1 +# the percentage of the different frame loss +LAM_FLOW = 2 + +LRATE_G = [0.0001, 0.00001] +LRATE_G_BOUNDARIES = [7000] + +LRATE_D = [0.00001, 0.000001] +LRATE_D_BOUNDARIES = [7000] + +[ped1] +# for lp loss. e.g, 1 or 2 for l1 and l2 loss, respectively) +L_NUM = 2 +# the power to which each gradient term is raised in GDL loss +ALPHA_NUM = 1 +# the percentage of the adversarial loss to use in the combined loss +LAM_ADV = 0.05 +# the percentage of the lp loss to use in the combined loss +LAM_LP = 1 +# the percentage of the GDL loss to use in the combined loss +LAM_GDL = 1 +# the percentage of the different frame loss +LAM_FLOW = 0.01 + +LRATE_G = [0.0001, 0.00001] +LRATE_G_BOUNDARIES = [40000] + +LRATE_D = [0.00001, 0.000001] +LRATE_D_BOUNDARIES = [40000] + + +[avenue] +# for lp loss. e.g, 1 or 2 for l1 and l2 loss, respectively) +L_NUM = 2 +# the power to which each gradient term is raised in GDL loss +ALPHA_NUM = 1 +# the percentage of the adversarial loss to use in the combined loss +LAM_ADV = 0.05 +# the percentage of the lp loss to use in the combined loss, +# we found in smaller lp is slightly better in avenue, but not too much difference. +LAM_LP = 0 +# the percentage of the GDL loss to use in the combined loss +LAM_GDL = 1 +# the percentage of the different frame loss +LAM_FLOW = 2 + +LRATE_G = [0.0002, 0.00002] +LRATE_G_BOUNDARIES = [100000] + +LRATE_D = [0.00002, 0.000002] +LRATE_D_BOUNDARIES = [100000] + + +[shanghaitech] +# for lp loss. e.g, 1 or 2 for l1 and l2 loss, respectively) +L_NUM = 2 +# the power to which each gradient term is raised in GDL loss +ALPHA_NUM = 1 +# the percentage of the adversarial loss to use in the combined loss +LAM_ADV = 0.05 +# the percentage of the lp loss to use in the combined loss +LAM_LP = 1 +# the percentage of the GDL loss to use in the combined loss +LAM_GDL = 1 +# the percentage of the different frame loss +LAM_FLOW = 2 + +LRATE_G = [0.0002, 0.00002] +LRATE_G_BOUNDARIES = [50000] + +LRATE_D = [0.00002, 0.000002] +LRATE_D_BOUNDARIES = [50000] + + +[toydata] +# for lp loss. e.g, 1 or 2 for l1 and l2 loss, respectively) +L_NUM = 2 +# the power to which each gradient term is raised in GDL loss +ALPHA_NUM = 1 +# the percentage of the adversarial loss to use in the combined loss +LAM_ADV = 0.05 +# the percentage of the lp loss to use in the combined loss +LAM_LP = 1 +# the percentage of the GDL loss to use in the combined loss +LAM_GDL = 1 +# the percentage of the different frame loss +LAM_FLOW = 2 + +LRATE_G = [0.0001, 0.00001] +LRATE_G_BOUNDARIES = [7000] + +LRATE_D = [0.00001, 0.000001] +LRATE_D_BOUNDARIES = [7000] |
