summaryrefslogtreecommitdiff
path: root/become_yukarin/acoustic_converter.py
diff options
context:
space:
mode:
authorHiroshiba Kazuyuki <kazuyuki_hiroshiba@dwango.co.jp>2018-02-02 16:59:33 +0900
committerHiroshiba Kazuyuki <kazuyuki_hiroshiba@dwango.co.jp>2018-02-10 20:52:52 +0900
commit0b410c0bbacc147950438423835547b74859aac1 (patch)
treea5f8dc39e4d980bcfb09b21f47bf715d860c2e45 /become_yukarin/acoustic_converter.py
parent4ff5252dbdc0cdaeecc7fbe399c629e4d29de3a3 (diff)
pix2pix convertモデル
Diffstat (limited to 'become_yukarin/acoustic_converter.py')
-rw-r--r--become_yukarin/acoustic_converter.py4
1 files changed, 4 insertions, 0 deletions
diff --git a/become_yukarin/acoustic_converter.py b/become_yukarin/acoustic_converter.py
index 498bdb1..62eacff 100644
--- a/become_yukarin/acoustic_converter.py
+++ b/become_yukarin/acoustic_converter.py
@@ -73,6 +73,9 @@ class AcousticConverter(object):
input = self._feature_normalize(input, test=True)
input = self._encode_feature(input, test=True)
+ pad = 128 - input.shape[1] % 128
+ input = numpy.pad(input, [(0, 0), (0, pad)], mode='minimum')
+
converter = partial(chainer.dataset.convert.concat_examples, device=self.gpu, padding=0)
inputs = converter([input])
@@ -81,6 +84,7 @@ class AcousticConverter(object):
if self.gpu is not None:
out = chainer.cuda.to_cpu(out)
+ out = out[:, :-pad]
out = self._decode_feature(out, test=True)
out = AcousticFeature(