Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_read_bigfield(self):
limit = clevercsv.field_size_limit()
try:
size = 500
bigstring = "X" * size
bigline = "%s,%s" % (bigstring, bigstring)
self._read_test([bigline], [[bigstring, bigstring]])
clevercsv.field_size_limit(size)
self._read_test([bigline], [[bigstring, bigstring]])
self.assertEqual(clevercsv.field_size_limit(), size)
clevercsv.field_size_limit(size=-1)
self.assertRaises(clevercsv.Error, self._read_test, [bigline], [])
self.assertRaises(TypeError, clevercsv.field_size_limit, None)
self.assertRaises(TypeError, clevercsv.field_size_limit, 1, None)
finally:
clevercsv.field_size_limit(limit)
def test_read_bigfield(self):
limit = clevercsv.field_size_limit()
try:
size = 500
bigstring = "X" * size
bigline = "%s,%s" % (bigstring, bigstring)
self._read_test([bigline], [[bigstring, bigstring]])
clevercsv.field_size_limit(size)
self._read_test([bigline], [[bigstring, bigstring]])
self.assertEqual(clevercsv.field_size_limit(), size)
clevercsv.field_size_limit(size=-1)
self.assertRaises(clevercsv.Error, self._read_test, [bigline], [])
self.assertRaises(TypeError, clevercsv.field_size_limit, None)
self.assertRaises(TypeError, clevercsv.field_size_limit, 1, None)
finally:
clevercsv.field_size_limit(limit)
def test_read_bigfield(self):
limit = clevercsv.field_size_limit()
try:
size = 500
bigstring = "X" * size
bigline = "%s,%s" % (bigstring, bigstring)
self._read_test([bigline], [[bigstring, bigstring]])
clevercsv.field_size_limit(size)
self._read_test([bigline], [[bigstring, bigstring]])
self.assertEqual(clevercsv.field_size_limit(), size)
clevercsv.field_size_limit(size=-1)
self.assertRaises(clevercsv.Error, self._read_test, [bigline], [])
self.assertRaises(TypeError, clevercsv.field_size_limit, None)
self.assertRaises(TypeError, clevercsv.field_size_limit, 1, None)
finally:
clevercsv.field_size_limit(limit)
def detect_consistency_dialects(data, dialects, verbose=False):
"""Wrapper for dialect detection with the consistency measure
This function takes a list of dialects to consider.
"""
log = lambda *a, **kw: print(*a, **kw) if verbose else None
log("Considering %i dialects." % len(dialects))
old_limit = field_size_limit(len(data) + 1)
scores = consistency_scores(data, dialects, skip=True, logger=log)
H = get_best_set(scores)
result = break_ties(data, H)
field_size_limit(old_limit)
return result
def detect_consistency_dialects(data, dialects, verbose=False):
"""Wrapper for dialect detection with the consistency measure
This function takes a list of dialects to consider.
"""
log = lambda *a, **kw: print(*a, **kw) if verbose else None
log("Considering %i dialects." % len(dialects))
old_limit = field_size_limit(len(data) + 1)
scores = consistency_scores(data, dialects, skip=True, logger=log)
H = get_best_set(scores)
result = break_ties(data, H)
field_size_limit(old_limit)
return result
def __iter__(self):
self.parser_gen = Parser(
self.csvfile,
delimiter=self.dialect.delimiter,
quotechar=self.dialect.quotechar,
escapechar=self.dialect.escapechar,
field_limit=field_size_limit(),
strict=self.dialect.strict,
)
return self