Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, pattern=r'\w+|\S', lower=False):
super(RegExpTokenizer, self).__init__(lower)
self._regex = re.compile(pattern)
parser.add_argument('out_file', type=str,
help='output corpus file (.txt.bz2)')
parser.add_argument('--tokenizer', choices=('regexp', 'nltk', 'mecab'),
default='regexp',
help='type of tokenizer [regexp]')
parser.add_argument('--lower', action='store_true',
help='lowercase words (not applied to NEs)')
parser.add_argument('--mecab_dic', type=str, default=None,
help='dictionary for MeCab tokenizer')
parser.add_argument('--mecab_udic', type=str, default=None,
help='user dictionary for MeCab tokenizer')
args = parser.parse_args()
if args.tokenizer == 'regexp':
logging.info('tokenizer: RegExpTokenizer')
tokenizer = tokenizers.RegExpTokenizer(lower=args.lower)
elif args.tokenizer == 'nltk':
logging.info('tokenizer: NLTKTokenizer')
tokenizer = tokenizers.NLTKTokenizer(lower=args.lower)
elif args.tokenizer == 'mecab':
logging.info('tokenizer: MeCabTokenizer')
logging.info(f'dictionary: {args.mecab_dic}')
logging.info(f'user dictionary: {args.mecab_udic}')
tokenizer = tokenizers.MeCabTokenizer(
dic=args.mecab_dic, udic=args.mecab_udic, lower=args.lower)
else:
raise Exception('Undefined tokenizer type.')
logging.info('generating corpus for training')
n_processed = 0
with gzip.open(args.cirrus_file, 'rt') as fi, \
bz2.open(args.out_file, 'wt') as fo: