How to use the fairseq.meters.AverageMeter function in fairseq

To help you get started, we’ve selected a few fairseq examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github freewym / espresso / espresso / speech_train.py View on Github external
    extra_meters = collections.defaultdict(lambda: AverageMeter())
    valid_subsets = args.valid_subset.split(',')
github ecchochan / roberta-squad / fairseq_train_embed_cn.py View on Github external
    extra_meters = collections.defaultdict(lambda: AverageMeter())
    valid_subsets = args.valid_subset.split(',')
github ecchochan / roberta-squad / fairseq_train_imsf.py View on Github external
    extra_meters = collections.defaultdict(lambda: AverageMeter())
    valid_subsets = args.valid_subset.split(',')
github pytorch / translate / pytorch_translate / evals.py View on Github external
    extra_meters = defaultdict(lambda: AverageMeter())
    for sample in progress:
github ecchochan / roberta-squad / fairseq_train_embed_cn.py View on Github external
def init_meters(self, args):
        self.meters = OrderedDict()
        self.meters['train_loss'] = AverageMeter()
        self.meters['train_nll_loss'] = AverageMeter()
        self.meters['valid_loss'] = AverageMeter()
        self.meters['valid_nll_loss'] = AverageMeter()
        self.meters['wps'] = TimeMeter()       # words per second
        self.meters['ups'] = TimeMeter()       # updates per second
        self.meters['wpb'] = AverageMeter()    # words per batch
        self.meters['bsz'] = AverageMeter()    # sentences per batch
        self.meters['gnorm'] = AverageMeter()  # gradient norm
        self.meters['clip'] = AverageMeter()   # % of updates clipped
        self.meters['oom'] = AverageMeter()    # out of memory
        if args.fp16:
            self.meters['loss_scale'] = AverageMeter()  # dynamic loss scale
        self.meters['wall'] = TimeMeter()      # wall time in seconds
        self.meters['train_wall'] = StopwatchMeter()  # train wall time in seconds
github KelleyYin / Cross-lingual-Summarization / Teaching-Attention / train.py View on Github external
        extra_meters = collections.defaultdict(lambda: AverageMeter())
github freewym / espresso / fairseq / trainer.py View on Github external
def init_meters(self, args):
        self.meters = OrderedDict()
        self.meters["train_loss"] = AverageMeter()
        self.meters["train_nll_loss"] = AverageMeter()
        self.meters["valid_loss"] = AverageMeter()
        self.meters["valid_nll_loss"] = AverageMeter()
        self.meters["wps"] = TimeMeter()  # words per second
        self.meters["ups"] = TimeMeter()  # updates per second
        self.meters["wpb"] = AverageMeter()  # words per batch
        self.meters["bsz"] = AverageMeter()  # sentences per batch
        self.meters["gnorm"] = AverageMeter()  # gradient norm
        self.meters["clip"] = AverageMeter()  # % of updates clipped
        self.meters["oom"] = AverageMeter()  # out of memory
        if args.fp16:
            self.meters["loss_scale"] = AverageMeter()  # dynamic loss scale
        self.meters["wall"] = TimeMeter()  # wall time in seconds
        self.meters["train_wall"] = StopwatchMeter()  # train wall time in seconds
github KelleyYin / Cross-lingual-Summarization / Teacher-Student / train.py View on Github external
        extra_meters = collections.defaultdict(lambda: AverageMeter())
github ecchochan / roberta-squad / fairseq_train_cn.py View on Github external
        extra_meters = collections.defaultdict(lambda: AverageMeter())
github freewym / espresso / singleprocess_train.py View on Github external
    extra_meters = collections.defaultdict(lambda: AverageMeter())
    max_update = args.max_update or math.inf