summaryrefslogtreecommitdiff
path: root/become_yukarin/super_resolution.py
diff options
context:
space:
mode:
authorHiroshiba Kazuyuki <kazuyuki_hiroshiba@dwango.co.jp>2018-01-15 22:18:02 +0900
committerHiroshiba Kazuyuki <kazuyuki_hiroshiba@dwango.co.jp>2018-01-15 22:18:02 +0900
commit83608c12e7bb28df1966cbe5b9d86a8e23175044 (patch)
tree6fc24caaa01d447bf9819bf6c45b3e2d33685579 /become_yukarin/super_resolution.py
parentc0f3eacabde5d41992a5ae1d8d8f0f170f6b155e (diff)
超解像可能に
Diffstat (limited to 'become_yukarin/super_resolution.py')
-rw-r--r--become_yukarin/super_resolution.py16
1 files changed, 11 insertions, 5 deletions
diff --git a/become_yukarin/super_resolution.py b/become_yukarin/super_resolution.py
index 535af57..bdb2e61 100644
--- a/become_yukarin/super_resolution.py
+++ b/become_yukarin/super_resolution.py
@@ -1,6 +1,5 @@
from functools import partial
from pathlib import Path
-from typing import Optional
import chainer
import numpy
@@ -39,21 +38,28 @@ class SuperResolution(object):
def convert(self, input: numpy.ndarray) -> numpy.ndarray:
converter = partial(chainer.dataset.convert.concat_examples, padding=0)
- inputs = converter([numpy.log(input)[:, :-1]])
+ pad = 128 - len(input) % 128
+ input = numpy.pad(input, [(0, pad), (0, 0)], mode='minimum')
+ input = numpy.log(input)[:, :-1]
+ input = input[numpy.newaxis]
+ inputs = converter([input])
with chainer.using_config('train', False):
out = self.model(inputs).data[0]
out = out[0]
- out[:, out.shape[1]] = out[:, -1]
+ out = numpy.pad(out, [(0, 0), (0, 1)], mode='edge')
+ out = numpy.exp(out)
+ out = out[:-pad]
return out
def convert_to_audio(
self,
input: numpy.ndarray,
acoustic_feature: AcousticFeature,
- sampling_rate: Optional[int] = None,
+ sampling_rate: int,
):
+ acoustic_feature = acoustic_feature.astype_only_float(numpy.float64)
out = pyworld.synthesize(
f0=acoustic_feature.f0.ravel(),
spectrogram=input.astype(numpy.float64),
@@ -76,7 +82,7 @@ class SuperResolution(object):
self,
input: numpy.ndarray,
acoustic_feature: AcousticFeature,
- sampling_rate: Optional[int] = None,
+ sampling_rate: int,
):
high = self.convert(input)
return self.convert_to_audio(high, acoustic_feature=acoustic_feature, sampling_rate=sampling_rate)