Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def storage(line, cell=None):
"""Implements the storage cell magic for ipython notebooks.
Args:
line: the contents of the storage line.
Returns:
The results of executing the cell.
"""
parser = datalab.utils.commands.CommandParser(prog='storage', description="""
Execute various storage-related operations. Use "%storage -h"
for help on a specific command.
""")
# TODO(gram): consider adding a move command too. I did try this already using the
# objects.patch API to change the object name but that fails with an error:
#
# Value 'newname' in content does not agree with value 'oldname'. This can happen when a value
# set through a parameter is inconsistent with a value set in the request.
#
# This is despite 'name' being identified as writable in the storage API docs.
# The alternative would be to use a copy/delete.
copy_parser = parser.subcommand('copy',
'Copy one or more GCS objects to a different location.')
copy_parser.add_argument('-s', '--source', help='The name of the object(s) to copy', nargs='+')
copy_parser.add_argument('-d', '--destination', required=True,
def monitoring(line, cell=None):
"""Implements the monitoring cell magic for ipython notebooks.
Args:
line: the contents of the storage line.
Returns:
The results of executing the cell.
"""
parser = datalab.utils.commands.CommandParser(prog='monitoring', description=(
'Execute various Monitoring-related operations. Use "%monitoring '
' -h" for help on a specific command.'))
list_parser = parser.subcommand(
'list', 'List the metrics or resource types in a monitored project.')
list_metric_parser = list_parser.subcommand(
'metrics',
'List the metrics that are available through the Monitoring API.')
list_metric_parser.add_argument(
'-t', '--type',
help='The type of metric(s) to list; can include wildchars.')
list_metric_parser.add_argument(
'-p', '--project', help='The project on which to execute the request.')
list_metric_parser.set_defaults(func=_list_metric_descriptors)
def ml(line, cell=None):
"""Implements the ml line cell magic.
Args:
line: the contents of the ml line.
cell: the contents of the ml cell.
Returns:
The results of executing the cell.
"""
parser = datalab.utils.commands.CommandParser(prog="ml", description="""
Execute various ml-related operations. Use "%%ml -h" for help on a specific command.
""")
preprocess_parser = parser.subcommand('preprocess', 'Run a preprocess job.')
preprocess_parser.add_argument('--usage',
help='Show usage from the specified preprocess package.',
action='store_true', default=False)
preprocess_parser.add_argument('--cloud',
help='Whether to run the preprocessing job in the cloud.',
action='store_true', default=False)
preprocess_parser.add_argument('--package',
help='The preprocess package to use. Can be a gs or local path.',
required=True)
preprocess_parser.set_defaults(func=_preprocess)
train_parser = parser.subcommand('train', 'Train an ML model.')
train_parser.add_argument('--usage',
def _create_bigquery_parser():
""" Create the parser for the %bigquery magics.
Note that because we use the func default handler dispatch mechanism of argparse,
our handlers can take only one argument which is the parsed args. So we must create closures
for the handlers that bind the cell contents and thus must recreate this parser for each
cell upon execution.
"""
parser = datalab.utils.commands.CommandParser(prog='bigquery', description="""
Execute various BigQuery-related operations. Use "%bigquery -h"
for help on a specific command.
""")
# This is a bit kludgy because we want to handle some line magics and some cell magics
# with the bigquery command.
# %%bigquery sample
_add_command(parser, _create_sample_subparser, _sample_cell)
# %%bigquery create
_add_command(parser, _create_create_subparser, _create_cell)
# %%bigquery delete
_add_command(parser, _create_delete_subparser, _delete_cell)
def tensorboard(line, cell=None):
"""Implements the tensorboard cell magic.
Args:
line: the contents of the tensorboard line.
Returns:
The results of executing the cell.
"""
parser = datalab.utils.commands.CommandParser(prog='tensorboard', description="""
Execute tensorboard operations. Use "%tensorboard -h" for help on a specific command.
""")
list_parser = parser.subcommand('list', 'List running TensorBoard instances.')
list_parser.set_defaults(func=_list)
start_parser = parser.subcommand('start', 'Start a TensorBoard server with the given logdir.')
start_parser.add_argument('--logdir',
help='The directory containing TensorFlow events. ' +
'Can be a GCS or local path.',
required=True)
start_parser.set_defaults(func=_start)
stop_parser = parser.subcommand('stop', 'Stop a TensorBoard server with the given pid.')
stop_parser.add_argument('--pid',
help='The pid of the TensorBoard instance to stop.',
required=True)
stop_parser.set_defaults(func=_stop)
namespace = datalab.utils.commands.notebook_environment()
def _create_sql_parser():
sql_parser = CommandParser(prog="%%sql",
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Create a named SQL module with one or more queries.
The cell body should contain an optional initial part defining the default
values for the variables, if any, using Python code, followed by one or more
queries.
Queries should start with 'DEFINE QUERY ' in order to bind them to
. in the notebook (as datalab.data.SqlStament instances).
The final query can optionally omit 'DEFINE QUERY ', as using the module
name in places where a SqlStatement is expected will resolve to the final query
in the module.
Queries can refer to variables with '$', as well as refer to other queries
within the same module, making it easy to compose nested queries and test their