Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# TODO: check if it impacts in memory usage.
# TODO: may add option to change it by passing a parameter to import/export.
unicodecsv.field_size_limit(16777216)
def fix_dialect(dialect):
if not dialect.doublequote and dialect.escapechar is None:
dialect.doublequote = True
if dialect.quoting == unicodecsv.QUOTE_MINIMAL and dialect.quotechar == "'":
# Python csv's Sniffer seems to detect a wrong quotechar when
# quoting is minimal
dialect.quotechar = '"'
class excel_semicolon(unicodecsv.excel):
delimiter = ';'
unicodecsv.register_dialect("excel-semicolon", excel_semicolon)
if six.PY2:
def discover_dialect(sample, encoding=None, delimiters=(b",", b";", b"\t", b"|")):
"""Discover a CSV dialect based on a sample size.
`encoding` is not used (Python 2)
"""
try:
dialect = sniffer.sniff(sample, delimiters=delimiters)
except unicodecsv.Error: # Couldn't detect: fall back to 'excel'
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.stream = f
self.writer = writer = csv.writer(f, dialect=dialect, **kwds)
self.lock = threading.RLock()
import unicodecsv as csv
class DocdataDialect(csv.excel):
""" Docdata CSV dialect. """
delimiter = '\t'
def discover_dialect(sample, encoding=None, delimiters=(b",", b";", b"\t", b"|")):
"""Discover a CSV dialect based on a sample size.
`encoding` is not used (Python 2)
"""
try:
dialect = sniffer.sniff(sample, delimiters=delimiters)
except unicodecsv.Error: # Couldn't detect: fall back to 'excel'
dialect = unicodecsv.excel
fix_dialect(dialect)
return dialect
def render_to_csv(self, context):
statement = context['statement']
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="financialStatementByEvent.csv"'
writer = csv.writer(response, csv.excel)
response.write(u'\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)
header_list = [
_('Event'),
_('Month'),
_('Registrations: Total'),
_('Registrations: Leads'),
_('Registrations: Follows'),
_('Revenues: Gross'),
_('Revenues: Net'),
_('Expenses: Instruction'),
_('Expenses: Venue'),
_('Expenses: Other'),
_('Expenses: Total'),
_('Net Profit'),
]
def getRevenueItemsCSV(queryset):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="revenueHistory.csv"'
writer = csv.writer(response, csv.excel)
response.write(u'\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)
header_list = [
_('Description'),
_('Revenue Category'),
_('Gross Total (Pre-Discounts & Vouchers)'),
_('Net Total'),
_('Received From'),
_('Registration'),
_('Event'),
_('Received'),
_('Received Date')
]
writer.writerow(header_list)
for x in queryset:
def getExpenseItemsCSV(queryset, scope='instructor'):
response = HttpResponse(content_type='text/csv')
if scope == 'instructor':
response['Content-Disposition'] = 'attachment; filename="paymentHistory.csv"'
else:
response['Content-Disposition'] = 'attachment; filename="expenseHistory.csv"'
writer = csv.writer(response, csv.excel)
response.write(u'\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)
header_list = [
_('Description'),
_('Expense Category'),
_('Hours'),
_('Wage Rate'),
_('Total Payment'),
_('Is Reimbursement'),
_('Submission Date'),
_('Event'),
_('Approved'),
_('Paid'),
_('Payment Date'),
]
decoded = sample.decode(encoding)
except UnicodeDecodeError as exception:
_, _, _, pos, error = exception.args
if error == "unexpected end of data" and pos == len(sample):
sample = sample[:-1]
else:
raise
else:
finished = True
try:
dialect = sniffer.sniff(decoded, delimiters=delimiters)
except unicodecsv.Error: # Couldn't detect: fall back to 'excel'
dialect = unicodecsv.excel
fix_dialect(dialect)
return dialect
def unicode_csv_reader(csv_data, dialect=csv.excel, charset='utf-8', **kwargs):
csv_reader = csv.reader(csv_data, dialect=dialect, encoding=charset, **kwargs)
for row in csv_reader:
yield row
def export_to_csv(
table,
filename_or_fobj=None,
encoding="utf-8",
dialect=unicodecsv.excel,
batch_size=100,
callback=None,
*args,
**kwargs
):
"""Export a `rows.Table` to a CSV file.
If a file-like object is provided it MUST be in binary mode, like in
`open(filename, mode='wb')`.
If not filename/fobj is provided, the function returns a string with CSV
contents.
"""
# TODO: will work only if table.fields is OrderedDict
# TODO: should use fobj? What about creating a method like json.dumps?