Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
sql.Identifier('pg_catalog', 'pg_database')
),
# Literal
sql.SQL('''SELECT {}''').format(sql.Literal('foobar')),
# Placeholder
sql.SQL('''SELECT {}''').format(sql.Placeholder())
], ids=('str', 'bytes', 'unicode', 'Composed',
'Identifier', 'Literal', 'Placeholder'))
def test_execute_sql(tracer, engine, connection, method, query):
# Check that executing with objects of ``sql.Composable`` subtypes doesn't
# raise any exceptions.
metadata.create_all(engine)
with tracer.start_active_span('test'):
cur = connection.cursor()
params = ('foobar', )
def cleanup_postgresql(connection, truncate_tables):
print('cleanup_postgreql({!r}, {!r})'.format(connection, truncate_tables))
for table in truncate_tables:
with connection.cursor() as cursor:
try:
cursor.execute(pgsql.SQL('TRUNCATE TABLE {}').format(
pgsql.Identifier(table)))
except psycopg2.ProgrammingError as exc:
if exc.pgcode != psycopg2.errorcodes.UNDEFINED_TABLE:
raise
print("Error truncating {!r} table: {}".format(table, exc))
- ``tables`` -- a list of tables to rename (e.g. self.search_table, self.extra_table, self.stats.counts, self.stats.stats)
- ``indexed`` -- boolean, whether the temporary table has indexes on it.
"""
now = time.time()
backup_number = 1
for table in tables:
while self._table_exists("{0}_old{1}".format(table, backup_number)):
backup_number += 1
rename_table = SQL("ALTER TABLE {0} RENAME TO {1}")
rename_pkey = SQL("ALTER TABLE {0} RENAME CONSTRAINT {1} TO {2}")
rename_index = SQL("ALTER INDEX {0} RENAME TO {1}")
for table in tables:
self._execute(rename_table.format(Identifier(table), Identifier("{0}_old{1}".format(table, backup_number))), silent=True, commit=False)
self._execute(rename_table.format(Identifier(table + "_tmp"), Identifier(table)), silent=True, commit=False)
self._execute(rename_pkey.format(Identifier("{0}_old{1}".format(table, backup_number)),
Identifier("{0}_pkey".format(table)),
Identifier("{0}_old{1}_pkey".format(table, backup_number))),
silent=True, commit=False)
self._execute(rename_pkey.format(Identifier(table),
Identifier("{0}_tmp_pkey".format(table)),
Identifier("{0}_pkey".format(table))),
silent=True, commit=False)
selecter = SQL("SELECT index_name FROM meta_indexes WHERE table_name = %s")
cur = self._execute(selecter, [self.search_table], silent=True, commit=False)
for res in cur:
self._execute(rename_index.format(Identifier(res[0]), Identifier("{0}_old{1}".format(res[0], backup_number))), silent=True, commit=False)
if indexed:
self._execute(rename_index.format(Identifier(res[0] + "_tmp"), Identifier(res[0])), silent=True, commit=False)
print "Swapped temporary tables for %s into place in %s secs\nNew backup at %s"%(self.search_table, time.time()-now, "{0}_old{1}".format(self.search_table, backup_number))
self.conn.commit()
if sort is None:
has_sort = True
if self._sort is None:
if limit is not None and not (limit == 1 and offset == 0):
sort = Identifier("id")
raw = ["id"]
else:
has_sort = False
raw = []
elif self._primary_sort in query or self._out_of_order:
# We use the actual sort because the postgres query planner doesn't know that
# the primary key is connected to the id.
sort = self._sort
raw = self._sort_orig
else:
sort = Identifier("id")
raw = ["id"]
return sort, has_sort, raw
else:
return self._sort_str(sort), bool(sort), sort
def _identify_tables(self, search_table, extra_table):
"""
Utility function for normalizing input on ``resort``.
"""
if search_table is not None:
search_table = Identifier(search_table)
else:
search_table = Identifier(self.search_table)
if extra_table is not None:
if self.extra_table is None:
raise ValueError("No extra table")
extra_table = Identifier(extra_table)
elif self.extra_table is not None:
extra_table = Identifier(self.extra_table)
return search_table, extra_table
def _identify_tables(self, search_table, extra_table):
"""
Utility function for normalizing input on ``resort``.
"""
if search_table is not None:
search_table = Identifier(search_table)
else:
search_table = Identifier(self.search_table)
if extra_table is not None:
if self.extra_table is None:
raise ValueError("No extra table")
extra_table = Identifier(extra_table)
elif self.extra_table is not None:
extra_table = Identifier(self.extra_table)
return search_table, extra_table
INPUT:
- ``query`` -- a mongo-style dictionary, as in the ``search`` method.
- ``split_list`` -- see the ``add_stats`` method.
- ``record`` -- boolean (default False). Whether to store the result in the count table.
- ``suffix`` -- if provided, the table with that suffix added will be
used to perform the count
- ``extra`` -- used if the result is recorded (see discussion at the top of this class).
OUTPUT:
The number of rows in the search table satisfying the query.
"""
if split_list:
raise NotImplementedError
selecter = SQL("SELECT COUNT(*) FROM {0}").format(Identifier(self.search_table + suffix))
qstr, values = self.table._parse_dict(query)
if qstr is not None:
selecter = SQL("{0} WHERE {1}").format(selecter, qstr)
cur = self._execute(selecter, values)
nres = cur.fetchone()[0]
if record:
self._record_count(query, nres, split_list, suffix, extra)
return nres
).format(
schema=sql.Identifier(schema),
name=sql.Identifier(name),
col=sql.Identifier(col['name'])
)
)
logs.append("add " + col['name'])
changed = True
if diff['primary_key'] is not True:
changed = diff['primary_key'] is None
cursor.execute(
sql.SQL("ALTER TABLE {schema}.{name} DROP CONSTRAINT IF EXISTS {pkname}").format(
schema=sql.Identifier(schema),
name=sql.Identifier(name),
pkname=sql.Identifier(name + "_pkey")
)
)
if len(primary_key) > 0:
changed = True
_pk = map(lambda c: sql.Identifier(c), primary_key)
cursor.execute(
sql.SQL("ALTER TABLE {schema}.{name} ADD PRIMARY KEY ({pkey})").format(
schema=sql.Identifier(schema),
name=sql.Identifier(name),
pkey=sql.SQL(', ').join(_pk)
)
)
logs.append("add primary key")
cursor.execute(