summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJules Laplace <julescarbon@gmail.com>2018-07-23 15:48:14 +0200
committerJules Laplace <julescarbon@gmail.com>2018-07-23 15:48:14 +0200
commit6c34feb53b18c45829ff001edca2b6fa306c1429 (patch)
tree6b630677ccea310293853b97f103309134d561ca
parentf12508d660175f6a0870fc118a48589a3435abe9 (diff)
superres
-rw-r--r--NOTES25
-rw-r--r--become_yukarin/acoustic_converter.py7
2 files changed, 22 insertions, 10 deletions
diff --git a/NOTES b/NOTES
index 7aed73c..762df82 100644
--- a/NOTES
+++ b/NOTES
@@ -30,8 +30,8 @@ parser.add_argument('--enable_overwrite', action='store_true')
# TRAIN VOICE CONVERSION NETWORKS
/home/spawn/.virtualenv/yukarin/bin/python train.py \
- ./20180719133105_2_holly_2_mat.json \
- ./data/mat-holly-24000/conversion-net/pp-el8-holly-2-mat
+ ./20180719133105.3.mat_2_holly.json \
+ ./data/mat-holly-24000/conversion-net/pp-el0-holly-2-mat
# OPTIONAL: TRAIN ON MULTIPLE GPUS USING RECIPE.JSON (RUNS MULTIPLE SCREENS)
@@ -106,11 +106,20 @@ parser.add_argument('-g', '--gpu', type=int)
+mkdir all
+for i in *.wav
+do
+ dir=${i%.*}
+ ../br_misc/split.sh $i 12 $dir
+ cd $dir
+ for j in *.wav
+ do
+ mv "$j" "${dir}_${j}"
+ done
+ cd ..
+ mv $i/* all
+done
-
-
-
-
-
-
+cd output/net/
+mkdir concat
diff --git a/become_yukarin/acoustic_converter.py b/become_yukarin/acoustic_converter.py
index 62eacff..706c6ac 100644
--- a/become_yukarin/acoustic_converter.py
+++ b/become_yukarin/acoustic_converter.py
@@ -122,17 +122,20 @@ class AcousticConverter(object):
def convert_from_audio_path(self, path: Path, out_sampling_rate: Optional[int] = None):
wave = self._wave_process(str(path), test=True)
feature = self._feature_process(wave, test=True)
- return self.convert_from_feature(feature, out_sampling_rate)
+ f0 = feature.f0
+ return self.convert_from_feature(feature, out_sampling_rate, f0)
def convert_from_feature_path(self, path: Path, out_sampling_rate: Optional[int] = None):
feature = self._acoustic_feature_load_process(path, test=True)
return self.convert_from_feature(feature, out_sampling_rate)
- def convert_from_feature(self, input: AcousticFeature, out_sampling_rate: Optional[int] = None):
+ def convert_from_feature(self, input: AcousticFeature, out_sampling_rate: Optional[int] = None, f0=None):
if out_sampling_rate is None:
out_sampling_rate = self.config.dataset.param.voice_param.sample_rate
out = self.convert_to_feature(input=input, out_sampling_rate=out_sampling_rate)
+ if f0 is not None:
+ out.f0 = f0
out = pyworld.synthesize(
f0=out.f0.ravel(),
spectrogram=out.spectrogram,