Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
['pa', 'ima', 'tu', 'mnogo', 'stvari', 'koje', 'ne', 'shvataš']
)
assert (
tokenize("Pa, ima tu mnogo stvari koje ne shvataš.", 'sr') ==
['pa', 'ima', 'tu', 'mnogo', 'stvari', 'koje', 'ne', 'shvataš']
)
# I don't have examples of complete sentences in Azerbaijani that are
# naturally in Cyrillic, because it turns out everyone writes Azerbaijani
# in Latin letters on the Internet, _except_ sometimes for Wiktionary.
# So here are some individual words.
# 'library' in Azerbaijani Cyrillic
assert preprocess_text('китабхана', 'az') == 'kitabxana'
assert preprocess_text('КИТАБХАНА', 'az') == 'kitabxana'
assert preprocess_text('KİTABXANA', 'az') == 'kitabxana'
# 'scream' in Azerbaijani Cyrillic
assert preprocess_text('бағырты', 'az') == 'bağırtı'
assert preprocess_text('БАҒЫРТЫ', 'az') == 'bağırtı'
assert preprocess_text('BAĞIRTI', 'az') == 'bağırtı'
tokenize("Па, има ту много ствари које не схваташ.", 'sr') ==
['pa', 'ima', 'tu', 'mnogo', 'stvari', 'koje', 'ne', 'shvataš']
)
assert (
tokenize("Pa, ima tu mnogo stvari koje ne shvataš.", 'sr') ==
['pa', 'ima', 'tu', 'mnogo', 'stvari', 'koje', 'ne', 'shvataš']
)
# I don't have examples of complete sentences in Azerbaijani that are
# naturally in Cyrillic, because it turns out everyone writes Azerbaijani
# in Latin letters on the Internet, _except_ sometimes for Wiktionary.
# So here are some individual words.
# 'library' in Azerbaijani Cyrillic
assert preprocess_text('китабхана', 'az') == 'kitabxana'
assert preprocess_text('КИТАБХАНА', 'az') == 'kitabxana'
assert preprocess_text('KİTABXANA', 'az') == 'kitabxana'
# 'scream' in Azerbaijani Cyrillic
assert preprocess_text('бағырты', 'az') == 'bağırtı'
assert preprocess_text('БАҒЫРТЫ', 'az') == 'bağırtı'
assert preprocess_text('BAĞIRTI', 'az') == 'bağırtı'
['pa', 'ima', 'tu', 'mnogo', 'stvari', 'koje', 'ne', 'shvataš']
)
# I don't have examples of complete sentences in Azerbaijani that are
# naturally in Cyrillic, because it turns out everyone writes Azerbaijani
# in Latin letters on the Internet, _except_ sometimes for Wiktionary.
# So here are some individual words.
# 'library' in Azerbaijani Cyrillic
assert preprocess_text('китабхана', 'az') == 'kitabxana'
assert preprocess_text('КИТАБХАНА', 'az') == 'kitabxana'
assert preprocess_text('KİTABXANA', 'az') == 'kitabxana'
# 'scream' in Azerbaijani Cyrillic
assert preprocess_text('бағырты', 'az') == 'bağırtı'
assert preprocess_text('БАҒЫРТЫ', 'az') == 'bağırtı'
assert preprocess_text('BAĞIRTI', 'az') == 'bağırtı'
tokenize("Pa, ima tu mnogo stvari koje ne shvataš.", 'sr') ==
['pa', 'ima', 'tu', 'mnogo', 'stvari', 'koje', 'ne', 'shvataš']
)
# I don't have examples of complete sentences in Azerbaijani that are
# naturally in Cyrillic, because it turns out everyone writes Azerbaijani
# in Latin letters on the Internet, _except_ sometimes for Wiktionary.
# So here are some individual words.
# 'library' in Azerbaijani Cyrillic
assert preprocess_text('китабхана', 'az') == 'kitabxana'
assert preprocess_text('КИТАБХАНА', 'az') == 'kitabxana'
assert preprocess_text('KİTABXANA', 'az') == 'kitabxana'
# 'scream' in Azerbaijani Cyrillic
assert preprocess_text('бағырты', 'az') == 'bağırtı'
assert preprocess_text('БАҒЫРТЫ', 'az') == 'bağırtı'
assert preprocess_text('BAĞIRTI', 'az') == 'bağırtı'
)
# I don't have examples of complete sentences in Azerbaijani that are
# naturally in Cyrillic, because it turns out everyone writes Azerbaijani
# in Latin letters on the Internet, _except_ sometimes for Wiktionary.
# So here are some individual words.
# 'library' in Azerbaijani Cyrillic
assert preprocess_text('китабхана', 'az') == 'kitabxana'
assert preprocess_text('КИТАБХАНА', 'az') == 'kitabxana'
assert preprocess_text('KİTABXANA', 'az') == 'kitabxana'
# 'scream' in Azerbaijani Cyrillic
assert preprocess_text('бағырты', 'az') == 'bağırtı'
assert preprocess_text('БАҒЫРТЫ', 'az') == 'bağırtı'
assert preprocess_text('BAĞIRTI', 'az') == 'bağırtı'
assert (
tokenize("Па, има ту много ствари које не схваташ.", 'sr') ==
['pa', 'ima', 'tu', 'mnogo', 'stvari', 'koje', 'ne', 'shvataš']
)
assert (
tokenize("Pa, ima tu mnogo stvari koje ne shvataš.", 'sr') ==
['pa', 'ima', 'tu', 'mnogo', 'stvari', 'koje', 'ne', 'shvataš']
)
# I don't have examples of complete sentences in Azerbaijani that are
# naturally in Cyrillic, because it turns out everyone writes Azerbaijani
# in Latin letters on the Internet, _except_ sometimes for Wiktionary.
# So here are some individual words.
# 'library' in Azerbaijani Cyrillic
assert preprocess_text('китабхана', 'az') == 'kitabxana'
assert preprocess_text('КИТАБХАНА', 'az') == 'kitabxana'
assert preprocess_text('KİTABXANA', 'az') == 'kitabxana'
# 'scream' in Azerbaijani Cyrillic
assert preprocess_text('бағырты', 'az') == 'bağırtı'
assert preprocess_text('БАҒЫРТЫ', 'az') == 'bağırtı'
assert preprocess_text('BAĞIRTI', 'az') == 'bağırtı'
Instead, it will use the large wordlist packaged with the Jieba tokenizer,
and it will leave Traditional Chinese characters as is. This will probably
give more accurate tokenization, but the resulting tokens won't necessarily
have word frequencies that can be looked up.
If you end up seeing tokens that are entire phrases or sentences glued
together, that probably means you passed in CJK text with the wrong
language code.
"""
# Use globals to load CJK tokenizers on demand, so that we can still run
# in environments that lack the CJK dependencies
global _mecab_tokenize, _jieba_tokenize
language = langcodes.get(lang)
info = get_language_info(language)
text = preprocess_text(text, language)
if info['tokenizer'] == 'mecab':
from wordfreq.mecab import mecab_tokenize as _mecab_tokenize
# Get just the language code out of the Language object, so we can
# use it to select a MeCab dictionary
tokens = _mecab_tokenize(text, language.language)
if not include_punctuation:
tokens = [token for token in tokens if not PUNCT_RE.match(token)]
elif info['tokenizer'] == 'jieba':
from wordfreq.chinese import jieba_tokenize as _jieba_tokenize
tokens = _jieba_tokenize(text, external_wordlist=external_wordlist)
if not include_punctuation:
tokens = [token for token in tokens if not PUNCT_RE.match(token)]
else: