Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
from byteps.torch.ops import init, shutdown
from byteps.torch.ops import size, local_size, rank, local_rank
import threading
import logging
try:
import queue
except ImportError:
import Queue as queue
import time
import math
import torch
import byteps.torch as bps
_DistributedOptimizer = bps._DistributedOptimizer
_bps_DistributedOptimizer = bps.DistributedOptimizer
broadcast_parameters = bps.broadcast_parameters
broadcast_optimizer_state = bps.broadcast_optimizer_state
class _CrossBarrier(_DistributedOptimizer):
"""An optimizer that wraps a _DistributedOptimizer, intercepting push-pull operations.
This class enables overlapping gradient push-pull with both backward and forward propagation while maintaining
correct dependencies. It can achieve even higher training performance than the default BytePS with proper system
parameters. To understand the principles behind barrier crossing, check the paper
https://dl.acm.org/citation.cfm?id=3359642
"""
def __init__(self, model, byteps_opt, num_steps=10**6):
"""Construct a new ScheduledOptimizer, which uses byteps optimizer under the hood for averaging gradients
across all workers.
Args:
model: The training model. BytePS uses the model object to register hooks.
if args.cuda:
# Move model to GPU.
model.cuda()
# BytePS: scale learning rate by the number of GPUs.
# Gradient Accumulation: scale learning rate by batches_per_pushpull
optimizer = optim.SGD(model.parameters(),
lr=(args.base_lr *
args.batches_per_pushpull * bps.size()),
momentum=args.momentum, weight_decay=args.wd)
# BytePS: (optional) compression algorithm.
compression = bps.Compression.fp16 if args.fp16_pushpull else bps.Compression.none
# BytePS: wrap optimizer with DistributedOptimizer.
optimizer = bps.DistributedOptimizer(
optimizer, named_parameters=model.named_parameters(),
compression=compression,
backward_passes_per_step=args.batches_per_pushpull)
# Restore from a previous checkpoint, if initial_epoch is specified.
# BytePS: restore on the first worker which will broadcast weights to other workers.
if resume_from_epoch > 0 and bps.rank() == 0:
filepath = args.checkpoint_format.format(epoch=resume_from_epoch)
checkpoint = torch.load(filepath)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
# BytePS: broadcast parameters & optimizer state.
bps.broadcast_parameters(model.state_dict(), root_rank=0)
bps.broadcast_optimizer_state(optimizer, root_rank=0)
dropout=opt.dropout).to(device)
print("src_vocab_size:",opt.src_vocab_size,",tgt_vocab_size:",opt.tgt_vocab_size,",share_weight:",(opt.proj_share_weight,opt.embs_share_weight))
if hvd.local_rank() == 0:
fo = open("transformer_model.csv", "w")
for name, p in transformer.named_parameters():
if p.requires_grad:
size = 1
for s in list(p.size()):
size = size * s
print("name:",name,", size:",size)
fo.write(name+", "+str(size)+"\n")
fo.close()
torch_optimizer = optim.Adam(filter(lambda x: x.requires_grad, transformer.parameters()), betas=(0.9, 0.98), eps=1e-09)
if use_horovod > 0:
torch_optimizer = hvd.DistributedOptimizer(torch_optimizer, named_parameters=transformer.named_parameters())
hvd.broadcast_parameters(transformer.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(torch_optimizer, root_rank=0)
#print("finish hvd preparation")
#optimizer = ScheduledOptim(torch_optimizer, opt.d_model, opt.n_warmup_steps)
optimizer = torch_optimizer
'''
optimizer = ScheduledOptim(
optim.Adam(
filter(lambda x: x.requires_grad, transformer.parameters()),
betas=(0.9, 0.98), eps=1e-09),
opt.d_model, opt.n_warmup_steps)
'''
train(transformer, training_data, validation_data, optimizer, device ,opt)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
if use_horovod == 1:
state_dict = optimizer.state_dict()
pop_dict = {}
for index, group in enumerate(state_dict['param_groups']):
if 'schedule' in group:
pop_dict[index] = group.pop('schedule')
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
optimizer.load_state_dict(state_dict)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
for index, group in enumerate(state_dict['param_groups']):
if index in pop_dict:
group['schedule'] = pop_dict[index]
optimizer.load_state_dict(state_dict)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
if args.do_train:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running training *****")
cudnn.benchmark = True
# Set up standard model.
model = getattr(models, args.model)(num_classes=args.num_classes)
if args.cuda:
# Move model to GPU.
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=0.01)
# BytePS: (optional) compression algorithm.
compression = bps.Compression.fp16 if args.fp16_pushpull else bps.Compression.none
# BytePS: wrap optimizer with DistributedOptimizer.
optimizer = bps.DistributedOptimizer(optimizer,
named_parameters=model.named_parameters(),
compression=compression)
# BytePS: broadcast parameters & optimizer state.
bps.broadcast_parameters(model.state_dict(), root_rank=0)
bps.broadcast_optimizer_state(optimizer, root_rank=0)
# Set up fake data
datasets = []
for _ in range(100):
data = torch.rand(args.batch_size, 3, 224, 224)
target = torch.LongTensor(args.batch_size).random_() % 1000
if args.cuda:
data, target = data.cuda(), target.cuda()
datasets.append(data)
data_index = 0
model = Net()
if args.cuda:
# Move model to GPU.
model.cuda()
# BytePS: scale learning rate by the number of GPUs.
optimizer = optim.SGD(model.parameters(), lr=args.lr * bps.size(),
momentum=args.momentum)
# BytePS: (optional) compression algorithm.
compression = bps.Compression.fp16 if args.fp16_pushpull else bps.Compression.none
# BytePS: wrap optimizer with DistributedOptimizer.
optimizer = bps.DistributedOptimizer(optimizer,
named_parameters=model.named_parameters(),
compression=compression)
# BytePS: broadcast parameters.
bps.broadcast_parameters(model.state_dict(), root_rank=0)
bps.broadcast_optimizer_state(optimizer, root_rank=0)
def train(epoch):
model.train()
# BytePS: set epoch to sampler for shuffling.
train_sampler.set_epoch(epoch)
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()