From 83608c12e7bb28df1966cbe5b9d86a8e23175044 Mon Sep 17 00:00:00 2001 From: Hiroshiba Kazuyuki Date: Mon, 15 Jan 2018 22:18:02 +0900 Subject: 超解像可能に MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- become_yukarin/super_resolution.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'become_yukarin/super_resolution.py') diff --git a/become_yukarin/super_resolution.py b/become_yukarin/super_resolution.py index 535af57..bdb2e61 100644 --- a/become_yukarin/super_resolution.py +++ b/become_yukarin/super_resolution.py @@ -1,6 +1,5 @@ from functools import partial from pathlib import Path -from typing import Optional import chainer import numpy @@ -39,21 +38,28 @@ class SuperResolution(object): def convert(self, input: numpy.ndarray) -> numpy.ndarray: converter = partial(chainer.dataset.convert.concat_examples, padding=0) - inputs = converter([numpy.log(input)[:, :-1]]) + pad = 128 - len(input) % 128 + input = numpy.pad(input, [(0, pad), (0, 0)], mode='minimum') + input = numpy.log(input)[:, :-1] + input = input[numpy.newaxis] + inputs = converter([input]) with chainer.using_config('train', False): out = self.model(inputs).data[0] out = out[0] - out[:, out.shape[1]] = out[:, -1] + out = numpy.pad(out, [(0, 0), (0, 1)], mode='edge') + out = numpy.exp(out) + out = out[:-pad] return out def convert_to_audio( self, input: numpy.ndarray, acoustic_feature: AcousticFeature, - sampling_rate: Optional[int] = None, + sampling_rate: int, ): + acoustic_feature = acoustic_feature.astype_only_float(numpy.float64) out = pyworld.synthesize( f0=acoustic_feature.f0.ravel(), spectrogram=input.astype(numpy.float64), @@ -76,7 +82,7 @@ class SuperResolution(object): self, input: numpy.ndarray, acoustic_feature: AcousticFeature, - sampling_rate: Optional[int] = None, + sampling_rate: int, ): high = self.convert(input) return self.convert_to_audio(high, acoustic_feature=acoustic_feature, sampling_rate=sampling_rate) -- cgit v1.2.3-70-g09d2