Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_get_merged_dataset_data_returns_specified_columns(self):
data = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('SINGLE/COLUMN', {'column_index': [1]}),
('WIKI/MSFT')]).data()
actual = data.to_pandas().columns.tolist()
expected = [six.u('NSE/OIL - column.1'),
six.u('NSE/OIL - column.2'),
six.u('SINGLE/COLUMN - column.1'),
six.u('WIKI/MSFT - column.1'),
six.u('WIKI/MSFT - column.2'),
six.u('WIKI/MSFT - column.3')]
six.assertCountEqual(self, actual, expected)
def test_get_merged_dataset_data_to_list(self):
data = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('SINGLE/COLUMN', {'column_index': [1]}),
'WIKI/MSFT']).data()
results = data.to_list()
# NSE/OIL two columns of data
# SINGLE/COLUMN one column of data
# WIKI/MSFT all 3 columns of data
expected = [[datetime.datetime(2015, 7, 11, 0, 0), 444.3, 10, 444.3, 444.3, 10, 3],
[datetime.datetime(2015, 7, 13, 0, 0), 433.3, 4, 433.3, 433.3, 4, 3],
[datetime.datetime(2015, 7, 14, 0, 0), 437.5, 3, 437.5, 437.5, 3, 3],
[datetime.datetime(2015, 7, 15, 0, 0), 440.0, 2, 440.0, 440.0, 2, 3]]
for index, expected_item in enumerate(expected):
six.assertCountEqual(self, expected_item, results[index])
def test_sets_column_index_on_each_dataset(self):
md = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('WIKI/AAPL', {'column_index': [1]}),
('WIKI/MSFT')])
md.data_fields()
six.assertCountEqual(self, [1, 2], md._datasets[0].requested_column_indexes)
six.assertCountEqual(self, [1], md._datasets[1].requested_column_indexes)
six.assertCountEqual(self, [], md._datasets[2].requested_column_indexes)
def test_merged_dataset_calls_merged_dataset_get_dataset(self, mock):
mock.return_value = self.oil_obj
md = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('WIKI/AAPL', {'column_index': [1]}),
('WIKI/MSFT')])
md.data_fields()
expected_calls = [
call(('NSE/OIL', {'column_index': [1, 2]})),
call(('WIKI/AAPL', {'column_index': [1]})),
call('WIKI/MSFT')
]
self.assertEqual(mock.call_count, 3)
for index, expected in enumerate(expected_calls):
self.assertEqual(mock.mock_calls[index], expected)
@patch.object(MergedDataset, 'data')
def test_query_params_are_formed_with_old_arg_names(self, mock_method):
get(['WIKI/AAPL.1', 'WIKI/MSFT.2', 'NSE/OIL'],
authtoken='authtoken', trim_start='2001-01-01',
trim_end='2010-01-01', collapse='annual',
transformation='rdiff', rows=4, sort_order='desc')
self.assertEqual(mock_method.call_count, 1)
self.assertEqual(mock_method.mock_calls[0],
call(handle_not_found_error=True, handle_column_not_found=True,
params={'start_date': '2001-01-01', 'end_date': '2010-01-01',
'collapse': 'annual', 'transform': 'rdiff',
'rows': 4, 'order': 'desc'}))
def test_get_merged_dataset_data_returns_correct_types(self):
data = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('WIKI/AAPL', {'column_index': [1]}),
('WIKI/MSFT')]).data()
self.assertIsInstance(data, MergedDataList)
self.assertIsInstance(data[0], Data)
def test_merged_dataset_oldest_available_date(self):
md = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('WIKI/AAPL', {'column_index': [1]}),
('WIKI/MSFT')])
self.assertEqual(md.oldest_available_date, datetime.date(2013, 1, 1))
def test_merged_dataset_newest_available_date(self):
md = MergedDataset(
[('NSE/OIL', {'column_index': [1, 2]}),
('WIKI/AAPL', {'column_index': [1]}),
('WIKI/MSFT')])
self.assertEqual(md.newest_available_date, datetime.date(2015, 7, 30))
def test_data_forwards_requests_to_datset_data(self, mock_method):
mock_method.return_value = self.data_list_obj
MergedDataset(
['NSE/OIL', 'WIKI/AAPL',
'WIKI/MSFT']).data(params={'start_date': '2015-07-01'})
self.assertEqual(mock_method.call_count, 3)
for actual in mock_method.mock_calls:
self.assertEqual(actual, call(params={'start_date': '2015-07-01'}))