Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_isave_to_database(self):
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
self.session = Session()
myinput = TestInput()
myinput.isave_to_database(file_name=self.testfile,
session=self.session,
table=Signature)
array = pe.get_array(session=self.session, table=Signature)
assert array == self.data
self.session.close()
def test_get_array_from_dict(self):
adict = {"X": [1, 4], "Y": [2, 5], "Z": [3, 6]}
result = pe.get_array(adict=adict)
eq_(result, self.test_data)
def test_download(self):
response = self.app.get("/download")
ret = pe.get_array(file_type="csv", file_content=response.data)
print(ret)
self.assertEqual(
ret,
[
["REVIEW_DATE", "AUTHOR", "ISBN", "DISCOUNTED_PRICE"],
["1985/01/21", "Douglas Adams", "0345391802", 5.95],
["1990/01/12", "Douglas Hofstadter", "0465026567", 9.95],
[
"1998/07/15",
'Timothy "The Parser" Campbell',
"0968411304",
18.99,
],
["1999/12/03", "Richard Friedman", "0060630353", 5.95],
["2004/10/04", "Randel Helms", "0879755725", 4.5],
],
def read_deepsoil_results(name):
data = pyexcel.get_array(file_name=str(FPATH_DATA / (name + '.xlsx')))
names = ','.join(string.ascii_uppercase[:len(data[0])])
records = np.rec.fromrecords(data, names=names)
def extract_cols(records, cols, first, last, names):
return {
name: records[col][first:last].astype(float)
for col, name in zip(cols, names)
}
d = dict()
# Read the time series
d['time_series'] = extract_cols(
records, 'ABCDE', 1, 11800,
['time', 'accel', 'strain', 'stress', 'arias_int'])
# Read the response spectrum
d['resp_spec'] = extract_cols(records, 'GH', 1, 114, ['period', 'psa'])
["id", "name"],
[1, "News"],
[2, "Sports"]
]
for upload_file_type in ['xls', 'ods']:
with app.app_context():
db.drop_all()
db.create_all()
print("Uploading %s" % upload_file_type)
file_name = "test.%s" % upload_file_type
io = pe.save_as(array=array, dest_file_type=upload_file_type)
response = self.app.post('/upload/categories',
buffered=True,
data={"file": (io, file_name)},
content_type="multipart/form-data")
ret = pe.get_array(file_type="xls", file_content=response.data)
assert array == ret
def get_array(self, **keywords):
"""
Get a list of lists from the file
:param sheet_name: For an excel book, there could be multiple
sheets. If it is left unspecified, the
sheet at index 0 is loaded. For 'csv',
'tsv' file, *sheet_name* should be None anyway.
:param keywords: additional key words
:returns: A list of lists
"""
params = self.get_params(**keywords)
return pe.get_array(**params)
def load_feeder_info_from_file(path):
# Read from local file
print('Fetching feeder data from: {}'.format(path))
for row in pyexcel.get_array(file_name=path, start_row=1): # skip header
if(row[0] != "Stop"):
# Add a new feeder using these values
available_feeders.append(Feeder(feeder_ID=row[1],
device_name=clear_utf8_characters(row[2]),
stack_x_offset=stof(row[3]),
stack_y_offset=stof(row[4]),
height=stof(row[5]),
speed=stoi(row[6]),
head=stoi(row[7]),
angle_compensation=stoi(row[8]),
feed_spacing=stoi(row[9]),
place_component=(row[10] == 'Y'),
check_vacuum=(row[11] == 'Y'),
use_vision=(row[12] == 'Y'),
centroid_correction_x=stof(row[13]),
centroid_correction_y=stof(row[14]),