Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
async def _init_db(self):
self._connection = await asyncpg.connect(self.dsn)
async def delete_method_data():
conn = await asyncpg.connect(database=__DB_NAME)
await conn.execute('''DELETE from foglamp.tasks WHERE process_name IN ('testsleep30', 'echo_test')''')
await conn.execute(''' DELETE from foglamp.schedules WHERE process_name IN ('testsleep30', 'echo_test')''')
await conn.execute(''' COMMIT''')
await conn.close()
await asyncio.sleep(14)
async def create_database_connection():
connection = await asyncpg.connect(
user = "harmonbot",
password = os.getenv("DATABASE_PASSWORD"),
database = "harmonbot_beta" if BETA else "harmonbot",
host = os.getenv("POSTGRES_HOST") or "localhost"
)
await initialize_database_connection(connection)
try:
yield connection
finally:
await connection.close()
async def new_connection(self):
con_args = self._connect_args.copy()
con_args['database'] = self._dbname
try:
return await asyncpg.connect(**con_args)
except asyncpg.InvalidCatalogNameError as ex:
raise errors.AuthenticationError(str(ex)) from ex
except Exception as ex:
raise errors.InternalServerError(str(ex)) from ex
def _test_connection(self, timeout=60):
self._connection_addr = None
loop = asyncio.new_event_loop()
try:
for i in range(timeout):
if self._connection_addr is None:
conn_spec = self._get_connection_spec()
if conn_spec is None:
time.sleep(1)
continue
try:
con = loop.run_until_complete(
asyncpg.connect(database='postgres',
user='postgres',
timeout=5, loop=loop,
**self._connection_addr))
except (OSError, asyncio.TimeoutError,
asyncpg.CannotConnectNowError,
asyncpg.PostgresConnectionError):
time.sleep(1)
continue
except asyncpg.PostgresError:
# Any other error other than ServerNotReadyError or
# ConnectionError is interpreted to indicate the server is
# up.
break
else:
loop.run_until_complete(con.close())
break
async def _connect(self):
if self.conn is None:
self.conn = await asyncpg.connect(user=self.user, password=self.pw, database=self.db, host=self.host)
def __init__(self):
import asyncpg
self.connect = asyncpg.connect
self.Error = asyncpg.PostgresError, asyncpg.InterfaceError
self.connection_error_cls = asyncpg.PostgresConnectionError
async def read_audit_entries(limit=None, offset=None, source=None, severity=None):
"""
Args:
limit: the number of audit entries returned to the number specified
source: filter the audit entries to be only those from the specified source
severity: filter the audit entries to only those of the specified severity
offset: skip the first n entries in the audit table, used with limit to implemented paged interfaces
Returns:
list of audit trail entries sorted with most recent first
"""
conn = await asyncpg.connect(**__CONNECTION)
_limit_clause = " LIMIT {0}".format(limit) if limit else " "
_offset_clause = " "
if limit:
_offset_clause = " OFFSET {0}".format(offset) if offset else " "
# HACK: This way when we can more in the future we do not get an exponential explosion of if statements
_where_clause = " WHERE 1=1 "
if source:
_where_clause += "AND code='{0}' ".format(source)
if severity:
_where_clause += "AND level={0} ".format(_Severity[severity].value)
# Select the code, ts, level, log from the log table
query = """
SELECT code AS source, (ts)::varchar AS timestamp, level AS severity, log AS details
async def get_connection(self, name: str = None) -> asyncpg.connection.Connection:
connection_options = _get_connection_options(self.config)
dsn = self.get_dsn(name)
return await asyncpg.connect(dsn=dsn, **connection_options)
async def read_statistics_history(limit=None):
"""Fetch list of statistics, count limited by 'limit' optional, from statistics_history table"""
conn = await asyncpg.connect(**__CONNECTION)
_limit_clause = " LIMIT $1" if limit else " "
query = """
SELECT date_trunc('second', history_ts::timestamptz)::varchar as history_ts,
key,
value FROM statistics_history
WHERE date_trunc('second', history_ts::timestamptz)::varchar IN
(SELECT distinct date_trunc('second', history_ts::timestamptz)::varchar as history_ts
FROM statistics_history ORDER BY history_ts DESC {limit_clause})
ORDER BY history_ts, key;
""".format(limit_clause=_limit_clause)
stmt = await conn.prepare(query)
rows = await stmt.fetch(limit) if limit else await stmt.fetch()
columns = ('history_ts', 'key', 'value')