Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if self._query_execution.state != AthenaQueryExecution.STATE_SUCCEEDED:
raise ProgrammingError('QueryExecutionState is not SUCCEEDED.')
request = {
'QueryExecutionId': self._query_execution.query_id,
'MaxResults': self._arraysize,
}
if next_token:
request.update({'NextToken': next_token})
try:
response = retry_api_call(self._connection.client.get_query_results,
config=self._retry_config,
logger=_logger,
**request)
except Exception as e:
_logger.exception('Failed to fetch result set.')
raise_from(OperationalError(*e.args), e)
else:
return response
def execute(self, operation, parameters=None, work_group=None, s3_staging_dir=None,
cache_size=0):
self._reset_state()
self._query_id = self._execute(operation,
parameters=parameters,
work_group=work_group,
s3_staging_dir=s3_staging_dir,
cache_size=cache_size)
query_execution = self._poll(self._query_id)
if query_execution.state == AthenaQueryExecution.STATE_SUCCEEDED:
self._result_set = AthenaPandasResultSet(
self._connection, self._converter, query_execution, self.arraysize,
self._retry_config)
else:
raise OperationalError(query_execution.state_change_reason)
return self
def _as_pandas(self):
import pandas as pd
if not self.output_location:
raise ProgrammingError('OutputLocation is none or empty.')
bucket, key = self._parse_output_location(self.output_location)
try:
response = retry_api_call(self._client.get_object,
config=self._retry_config,
logger=_logger,
Bucket=bucket,
Key=key)
except Exception as e:
_logger.exception('Failed to download csv.')
raise_from(OperationalError(*e.args), e)
else:
length = response['ContentLength']
if length:
df = pd.read_csv(io.BytesIO(response['Body'].read()),
dtype=self.dtypes,
converters=self.converters,
parse_dates=self.parse_dates,
infer_datetime_format=True)
df = self._trunc_date(df)
else: # Allow empty response so DDL can be used
df = pd.DataFrame()
return df
def _cancel(self, query_id):
request = {'QueryExecutionId': query_id}
try:
retry_api_call(self._connection.client.stop_query_execution,
config=self._retry_config,
logger=_logger,
**request)
except Exception as e:
_logger.exception('Failed to cancel query.')
raise_from(OperationalError(*e.args), e)
def _get_query_execution(self, query_id):
request = {'QueryExecutionId': query_id}
try:
response = retry_api_call(self._connection.client.get_query_execution,
config=self._retry_config,
logger=_logger,
**request)
except Exception as e:
_logger.exception('Failed to get query execution.')
raise_from(OperationalError(*e.args), e)
else:
return AthenaQueryExecution(response)
if special.is_timing_enabled():
self.echo('Time: %0.03fs' % t)
except KeyboardInterrupt:
pass
start = time()
result_count += 1
mutating = mutating or is_mutating(status)
special.unset_once_if_written()
except EOFError as e:
raise e
except KeyboardInterrupt:
pass
except NotImplementedError:
self.echo('Not Yet Implemented.', fg="yellow")
except OperationalError as e:
LOGGER.debug("Exception: %r", e)
LOGGER.error("sql: %r, error: %r", text, e)
LOGGER.error("traceback: %r", traceback.format_exc())
self.echo(str(e), err=True, fg='red')
except Exception as e:
LOGGER.error("sql: %r, error: %r", text, e)
LOGGER.error("traceback: %r", traceback.format_exc())
self.echo(str(e), err=True, fg='red')
else:
# Refresh the table names and column names if necessary.
if need_completion_refresh(text):
self.refresh_completions()
query = Query(text, successful, mutating)
self.query_history.append(query)