Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def load_from_sheets(self, sheets):
"""
Load content from existing sheets
:param dict sheets: a dictionary of sheets. Each sheet is
a list of lists
"""
if sheets is None:
return
keys = sheets.keys()
if not isinstance(sheets, compact.OrderedDict):
# if the end user does not care about the order
# we put alphatical order
keys = sorted(keys)
for name in keys:
value = sheets[name]
if isinstance(value, Sheet):
sheet = value
sheet.name = name
else:
# array
sheet = Sheet(value, name)
# this sheets keep sheet order
self.__sheets.update({name: sheet})
# this provide the convenience of access the sheet
self.__dict__[name.replace(" ", "_")] = sheet
self.__name_array = list(self.__sheets.keys())
def __init__(self, filename, file_content=None, **keywords):
self.workbook = xlrd.open_workbook(filename, file_contents=file_content)
self.mysheets = OrderedDict()
for name in self.workbook.sheet_names():
data = to_array(XLSheet(
self.workbook.sheet_by_name(name)))
self.mysheets[name] = data
def merge_readers(reader_array, outfilename=DEFAULT_OUT_FILE):
"""merge many readers
With FilterableReader and SeriesReader, you can do custom filtering
:param str outfilename: save the sheet as
"""
if os.path.exists(outfilename):
raise NotImplementedError(MESSAGE_WARNING)
content = OrderedDict()
for reader in reader_array:
content.update(reader.dict)
save_as(dest_file_name=outfilename, adict=content)
initializers = inits
colnames_array = common.get_book_headers_in_array(book)
if initializers is None:
initializers = [None] * len(tables)
if mapdicts is None:
mapdicts = [None] * len(tables)
scattered = zip(tables, colnames_array, mapdicts, initializers)
importer = sql.SQLTableImporter(session)
for each_table in scattered:
adapter = sql.SQLTableImportAdapter(each_table[0])
adapter.column_names = each_table[1]
adapter.column_name_mapping_dict = each_table[2]
adapter.row_initializer = each_table[3]
importer.append(adapter)
to_store = OrderedDict()
for sheet in thebook:
# due book.to_dict() brings in column_names
# which corrupts the data
to_store[sheet.name] = sheet.get_internal_array()
save_data(importer, to_store, file_type=self._file_type, **keywords)
def _convert_dict_to_ordered_dict(the_dict):
keys = the_dict.keys()
if not PY2:
keys = list(keys)
keys = sorted(keys)
ret = OrderedDict()
for key in keys:
ret[key] = the_dict[key]
return ret
[
["Column 1", "Column 2", "Column 3"],
[1, 5, 9],
[2, 6, 10],
[3, 7, 11],
['', 8, 12],
['', '', 13]
]
:param dict the_dict: the dictionary to be converted.
:param bool with_keys: to write the keys as the first row or not
"""
keys = the_dict.keys()
if not PY2:
keys = list(keys)
if not isinstance(the_dict, OrderedDict):
keys = sorted(keys)
if with_keys:
yield keys
sorted_values = (the_dict[key] for key in keys)
for row in zip_longest(*sorted_values, fillvalue=''):
yield list(row)
def __init__(self, session=None, tables=None):
self.my_sheets = OrderedDict()
for table in tables:
sqltablereader = SQLTableReader(session, table)
self.my_sheets[sqltablereader.name]=sqltablereader.to_array()
def to_dict(self, row=False):
"""Returns a dictionary"""
the_dict = compact.OrderedDict()
if len(self.colnames) > 0 and row is False:
for column in self.named_columns():
the_dict.update(column)
elif len(self.rownames) > 0:
for row in self.named_rows():
the_dict.update(row)
else:
raise NotImplementedError("Not implemented")
return the_dict
def xto_dict(an_object):
"""convert a reader iterator to a dictionary"""
the_dict = OrderedDict()
series = "Series_%d"
count = 1
for row in an_object:
if type(row) == dict:
the_dict.update(row)
elif isinstance(row, Sheet):
the_dict.update({row.name: row.to_array()})
else:
key = series % count
the_dict.update({key: row})
count += 1
return the_dict