Skip to content

Commit

Permalink
will run on windows
Browse files Browse the repository at this point in the history
  • Loading branch information
Hiroshiba committed Aug 24, 2019
1 parent 83fccf4 commit 9fd4ebd
Show file tree
Hide file tree
Showing 6 changed files with 140 additions and 138 deletions.
4 changes: 2 additions & 2 deletions become_yukarin/dataset/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -485,7 +485,7 @@ def create(config: DatasetConfig):
# cropping
if config.train_crop_size is not None:
def add_seed():
return LambdaProcess(lambda d, test: dict(seed=numpy.random.randint(2 ** 32), **d))
return LambdaProcess(lambda d, test: dict(seed=numpy.random.randint(2 ** 31), **d))

def padding(s):
return ChainProcess([
Expand Down Expand Up @@ -587,7 +587,7 @@ def create_sr(config: SRDatasetConfig):
# cropping
if config.train_crop_size is not None:
def add_seed():
return LambdaProcess(lambda d, test: dict(seed=numpy.random.randint(2 ** 32), **d))
return LambdaProcess(lambda d, test: dict(seed=numpy.random.randint(2 ** 31), **d))

def padding(s):
return ChainProcess([
Expand Down
2 changes: 1 addition & 1 deletion recipe/config_sr.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"dataset": {
"input_glob": "/yukari-news-harvest/yukari-news-*.npy",
"input_glob": "./feature/*.npy",
"num_test": 5,
"input_global_noise": 3,
"input_local_noise": 3,
Expand Down
4 changes: 2 additions & 2 deletions scripts/extract_acoustic_feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,6 @@
parser.add_argument('--enable_overwrite', action='store_true')
arguments = parser.parse_args()

pprint(dir(arguments))

pre_convert = arguments.pre_converter1_config is not None
if pre_convert:
config = create_config(arguments.pre_converter1_config)
Expand Down Expand Up @@ -188,6 +186,8 @@ def concatenate(arr_list):


def main():
pprint(vars(arguments))

paths1 = list(sorted(arguments.input1_directory.glob('*')))
paths2 = list(sorted(arguments.input2_directory.glob('*')))
assert len(paths1) == len(paths2)
Expand Down
4 changes: 2 additions & 2 deletions scripts/extract_spectrogram_pair.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,6 @@
parser.add_argument('--enable_overwrite', action='store_true')
arguments = parser.parse_args()

pprint(dir(arguments))


def generate_file(path):
out = Path(arguments.output_directory, path.stem + '.npy')
Expand Down Expand Up @@ -74,6 +72,8 @@ def generate_file(path):


def main():
pprint(vars(arguments))

paths = list(sorted(arguments.input_directory.glob('*')))
arguments.output_directory.mkdir(exist_ok=True)

Expand Down
131 changes: 66 additions & 65 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,68 +14,69 @@
from become_yukarin.model.model import create
from become_yukarin.updater.updater import Updater

parser = argparse.ArgumentParser()
parser.add_argument('config_json_path', type=Path)
parser.add_argument('output', type=Path)
arguments = parser.parse_args()

config = create_from_json(arguments.config_json_path)
arguments.output.mkdir(exist_ok=True)
config.save_as_json((arguments.output / 'config.json').absolute())

# model
if config.train.gpu >= 0:
cuda.get_device_from_id(config.train.gpu).use()
predictor, discriminator = create(config.model)
models = {
'predictor': predictor,
'discriminator': discriminator,
}

# dataset
dataset = create_dataset(config.dataset)
train_iter = MultiprocessIterator(dataset['train'], config.train.batchsize)
test_iter = MultiprocessIterator(dataset['test'], config.train.batchsize, repeat=False, shuffle=False)
train_eval_iter = MultiprocessIterator(dataset['train_eval'], config.train.batchsize, repeat=False, shuffle=False)


# optimizer
def create_optimizer(model):
optimizer = optimizers.Adam(alpha=0.0002, beta1=0.5, beta2=0.999)
optimizer.setup(model)
return optimizer


opts = {key: create_optimizer(model) for key, model in models.items()}

# updater
converter = partial(convert.concat_examples, padding=0)
updater = Updater(
loss_config=config.loss,
predictor=predictor,
discriminator=discriminator,
device=config.train.gpu,
iterator=train_iter,
optimizer=opts,
converter=converter,
)

# trainer
trigger_log = (config.train.log_iteration, 'iteration')
trigger_snapshot = (config.train.snapshot_iteration, 'iteration')

trainer = training.Trainer(updater, out=arguments.output)

ext = extensions.Evaluator(test_iter, models, converter, device=config.train.gpu, eval_func=updater.forward)
trainer.extend(ext, name='test', trigger=trigger_log)
ext = extensions.Evaluator(train_eval_iter, models, converter, device=config.train.gpu, eval_func=updater.forward)
trainer.extend(ext, name='train', trigger=trigger_log)

trainer.extend(extensions.dump_graph('predictor/loss'))

ext = extensions.snapshot_object(predictor, filename='predictor_{.updater.iteration}.npz')
trainer.extend(ext, trigger=trigger_snapshot)

trainer.extend(extensions.LogReport(trigger=trigger_log))

trainer.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_json_path', type=Path)
parser.add_argument('output', type=Path)
arguments = parser.parse_args()

config = create_from_json(arguments.config_json_path)
arguments.output.mkdir(exist_ok=True)
config.save_as_json((arguments.output / 'config.json').absolute())

# model
if config.train.gpu >= 0:
cuda.get_device_from_id(config.train.gpu).use()
predictor, discriminator = create(config.model)
models = {
'predictor': predictor,
'discriminator': discriminator,
}

# dataset
dataset = create_dataset(config.dataset)
train_iter = MultiprocessIterator(dataset['train'], config.train.batchsize)
test_iter = MultiprocessIterator(dataset['test'], config.train.batchsize, repeat=False, shuffle=False)
train_eval_iter = MultiprocessIterator(dataset['train_eval'], config.train.batchsize, repeat=False, shuffle=False)


# optimizer
def create_optimizer(model):
optimizer = optimizers.Adam(alpha=0.0002, beta1=0.5, beta2=0.999)
optimizer.setup(model)
return optimizer


opts = {key: create_optimizer(model) for key, model in models.items()}

# updater
converter = partial(convert.concat_examples, padding=0)
updater = Updater(
loss_config=config.loss,
predictor=predictor,
discriminator=discriminator,
device=config.train.gpu,
iterator=train_iter,
optimizer=opts,
converter=converter,
)

# trainer
trigger_log = (config.train.log_iteration, 'iteration')
trigger_snapshot = (config.train.snapshot_iteration, 'iteration')

trainer = training.Trainer(updater, out=arguments.output)

ext = extensions.Evaluator(test_iter, models, converter, device=config.train.gpu, eval_func=updater.forward)
trainer.extend(ext, name='test', trigger=trigger_log)
ext = extensions.Evaluator(train_eval_iter, models, converter, device=config.train.gpu, eval_func=updater.forward)
trainer.extend(ext, name='train', trigger=trigger_log)

trainer.extend(extensions.dump_graph('predictor/loss'))

ext = extensions.snapshot_object(predictor, filename='predictor_{.updater.iteration}.npz')
trainer.extend(ext, trigger=trigger_snapshot)

trainer.extend(extensions.LogReport(trigger=trigger_log))

trainer.run()
133 changes: 67 additions & 66 deletions train_sr.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,69 +14,70 @@
from become_yukarin.model.sr_model import create_sr as create_sr_model
from become_yukarin.updater.sr_updater import SRUpdater

parser = argparse.ArgumentParser()
parser.add_argument('config_json_path', type=Path)
parser.add_argument('output', type=Path)
arguments = parser.parse_args()

config = create_from_json(arguments.config_json_path)
arguments.output.mkdir(exist_ok=True)
config.save_as_json((arguments.output / 'config.json').absolute())

# model
if config.train.gpu >= 0:
cuda.get_device_from_id(config.train.gpu).use()
predictor, discriminator = create_sr_model(config.model)
models = {
'predictor': predictor,
'discriminator': discriminator,
}

# dataset
dataset = create_sr_dataset(config.dataset)
train_iter = MultiprocessIterator(dataset['train'], config.train.batchsize)
test_iter = MultiprocessIterator(dataset['test'], config.train.batchsize, repeat=False, shuffle=False)
train_eval_iter = MultiprocessIterator(dataset['train_eval'], config.train.batchsize, repeat=False, shuffle=False)


# optimizer
def create_optimizer(model):
optimizer = optimizers.Adam(alpha=0.0002, beta1=0.5, beta2=0.999)
optimizer.setup(model)
return optimizer


opts = {key: create_optimizer(model) for key, model in models.items()}

# updater
converter = partial(convert.concat_examples, padding=0)
updater = SRUpdater(
loss_config=config.loss,
predictor=predictor,
discriminator=discriminator,
device=config.train.gpu,
iterator=train_iter,
optimizer=opts,
converter=converter,
)

# trainer
trigger_log = (config.train.log_iteration, 'iteration')
trigger_snapshot = (config.train.snapshot_iteration, 'iteration')

trainer = training.Trainer(updater, out=arguments.output)

ext = extensions.Evaluator(test_iter, models, converter, device=config.train.gpu, eval_func=updater.forward)
trainer.extend(ext, name='test', trigger=trigger_log)
ext = extensions.Evaluator(train_eval_iter, models, converter, device=config.train.gpu, eval_func=updater.forward)
trainer.extend(ext, name='train', trigger=trigger_log)

trainer.extend(extensions.dump_graph('predictor/loss'))

ext = extensions.snapshot_object(predictor, filename='predictor_{.updater.iteration}.npz')
trainer.extend(ext, trigger=trigger_snapshot)

trainer.extend(extensions.LogReport(trigger=trigger_log))
trainer.extend(extensions.PrintReport(['predictor/loss']))

trainer.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_json_path', type=Path)
parser.add_argument('output', type=Path)
arguments = parser.parse_args()

config = create_from_json(arguments.config_json_path)
arguments.output.mkdir(exist_ok=True)
config.save_as_json((arguments.output / 'config.json').absolute())

# model
if config.train.gpu >= 0:
cuda.get_device_from_id(config.train.gpu).use()
predictor, discriminator = create_sr_model(config.model)
models = {
'predictor': predictor,
'discriminator': discriminator,
}

# dataset
dataset = create_sr_dataset(config.dataset)
train_iter = MultiprocessIterator(dataset['train'], config.train.batchsize)
test_iter = MultiprocessIterator(dataset['test'], config.train.batchsize, repeat=False, shuffle=False)
train_eval_iter = MultiprocessIterator(dataset['train_eval'], config.train.batchsize, repeat=False, shuffle=False)


# optimizer
def create_optimizer(model):
optimizer = optimizers.Adam(alpha=0.0002, beta1=0.5, beta2=0.999)
optimizer.setup(model)
return optimizer


opts = {key: create_optimizer(model) for key, model in models.items()}

# updater
converter = partial(convert.concat_examples, padding=0)
updater = SRUpdater(
loss_config=config.loss,
predictor=predictor,
discriminator=discriminator,
device=config.train.gpu,
iterator=train_iter,
optimizer=opts,
converter=converter,
)

# trainer
trigger_log = (config.train.log_iteration, 'iteration')
trigger_snapshot = (config.train.snapshot_iteration, 'iteration')

trainer = training.Trainer(updater, out=arguments.output)

ext = extensions.Evaluator(test_iter, models, converter, device=config.train.gpu, eval_func=updater.forward)
trainer.extend(ext, name='test', trigger=trigger_log)
ext = extensions.Evaluator(train_eval_iter, models, converter, device=config.train.gpu, eval_func=updater.forward)
trainer.extend(ext, name='train', trigger=trigger_log)

trainer.extend(extensions.dump_graph('predictor/loss'))

ext = extensions.snapshot_object(predictor, filename='predictor_{.updater.iteration}.npz')
trainer.extend(ext, trigger=trigger_snapshot)

trainer.extend(extensions.LogReport(trigger=trigger_log))
trainer.extend(extensions.PrintReport(['predictor/loss']))

trainer.run()

0 comments on commit 9fd4ebd

Please sign in to comment.