Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if server_span_observer:
class TestBaseplateObserver(BaseplateObserver):
def on_server_span_created(self, context, server_span):
server_span.register(server_span_observer)
baseplate.register(TestBaseplateObserver())
# set up the server's processor
logger = mock.Mock(spec=logging.Logger)
edge_context_factory = make_edge_context_factory()
processor = TestService.Processor(handler)
processor = baseplateify_processor(processor, logger, baseplate, edge_context_factory)
# bind a server socket on an available port
server_bind_endpoint = config.Endpoint("127.0.0.1:0")
listener = make_listener(server_bind_endpoint)
server = make_server({"max_concurrency": "100"}, listener, processor)
# figure out what port the server ended up on
server_address = listener.getsockname()
server.endpoint = config.Endpoint(f"{server_address[0]}:{server_address[1]}")
# run the server until our caller is done with it
server_greenlet = gevent.spawn(server.serve_forever)
try:
yield server
finally:
server_greenlet.kill()
baseplate.register(TestBaseplateObserver())
# set up the server's processor
logger = mock.Mock(spec=logging.Logger)
edge_context_factory = make_edge_context_factory()
processor = TestService.Processor(handler)
processor = baseplateify_processor(processor, logger, baseplate, edge_context_factory)
# bind a server socket on an available port
server_bind_endpoint = config.Endpoint("127.0.0.1:0")
listener = make_listener(server_bind_endpoint)
server = make_server({"max_concurrency": "100"}, listener, processor)
# figure out what port the server ended up on
server_address = listener.getsockname()
server.endpoint = config.Endpoint(f"{server_address[0]}:{server_address[1]}")
# run the server until our caller is done with it
server_greenlet = gevent.spawn(server.serve_forever)
try:
yield server
finally:
server_greenlet.kill()
def get_endpoint_or_skip_container(name, default_port):
"""Find a test server of the given type or raise SkipTest.
This is useful for running tests in environments where we can't launch
servers.
If an environment variable like BASEPLATE_MEMCACHED_ADDR is present, that will
override the default of localhost:{default_port}.
"""
address = os.environ.get("BASEPLATE_%s_ADDR" % name.upper(), "localhost:%d" % default_port)
endpoint = Endpoint(address)
try:
sock = socket.socket(endpoint.family, socket.SOCK_STREAM)
sock.settimeout(0.1)
sock.connect(endpoint.address)
except socket.error:
raise unittest.SkipTest("could not find %s server for integration tests" % name)
else:
sock.close()
return endpoint
def test_endpoint_inet_invalid(self):
with self.assertRaises(ValueError):
config.Endpoint("localhost")
:param app_config: the raw application configuration
:param prefix: prefix for configuration keys
:param serializer: function to serialize values to strings suitable
for being stored in memcached. An example is
:py:func:`~baseplate.clients.memcache.lib.make_dump_and_compress_fn`.
:param deserializer: function to convert strings returned from
memcached to arbitrary objects, must be compatible with ``serializer``.
An example is :py:func:`~baseplate.clients.memcache.lib.decompress_and_load`.
:returns: :py:class:`pymemcache.client.base.PooledClient`
"""
assert prefix.endswith(".")
parser = config.SpecParser(
{
"endpoint": config.Endpoint,
"max_pool_size": config.Optional(config.Integer, default=None),
"connect_timeout": config.Optional(config.TimespanWithLegacyFallback, default=None),
"timeout": config.Optional(config.TimespanWithLegacyFallback, default=None),
"no_delay": config.Optional(config.Boolean, default=True),
}
)
options = parser.parse(prefix[:-1], app_config)
return PooledClient(
server=options.endpoint.address,
connect_timeout=options.connect_timeout and options.connect_timeout.total_seconds(),
timeout=options.timeout and options.timeout.total_seconds(),
serializer=serializer,
deserializer=deserializer,
no_delay=options.no_delay,
max_pool_size=options.max_pool_size,
args = arg_parser.parse_args()
if args.debug:
level = logging.DEBUG
else:
level = logging.WARNING
logging.basicConfig(level=level)
config_parser = configparser.RawConfigParser()
config_parser.read_file(args.config_file)
publisher_raw_cfg = dict(config_parser.items("trace-publisher:" + args.queue_name))
publisher_cfg = config.parse_config(
publisher_raw_cfg,
{
"zipkin_api_url": config.Endpoint,
"post_timeout": config.Optional(config.Integer, POST_TIMEOUT_DEFAULT),
"max_batch_size": config.Optional(config.Integer, MAX_BATCH_SIZE_DEFAULT),
"retry_limit": config.Optional(config.Integer, RETRY_LIMIT_DEFAULT),
},
)
trace_queue = MessageQueue(
"/traces-" + args.queue_name, max_messages=MAX_QUEUE_SIZE, max_message_size=MAX_SPAN_SIZE
)
# pylint: disable=maybe-no-member
inner_batch = TraceBatch(max_size=publisher_cfg.max_batch_size)
batcher = TimeLimitedBatch(inner_batch, MAX_BATCH_AGE)
metrics_client = metrics_client_from_config(publisher_raw_cfg)
publisher = ZipkinPublisher(
publisher_cfg.zipkin_api_url.address,
def _parse(watched_file: IO) -> _Inventory:
backends = []
for d in json.load(watched_file):
endpoint = Endpoint("%s:%d" % (d["host"], d["port"]))
weight = d["weight"] if d["weight"] is not None else 1
backend = Backend(d["id"], d["name"], endpoint, weight)
backends.append(backend)
lottery = None
if backends:
lottery = WeightedLottery(backends, weight_key=lambda b: b.weight)
return _Inventory(backends, lottery)
replaced with a new one. Written as a
:py:func:`~baseplate.lib.config.Timespan` e.g. ``1 minute``.
* ``timeout``: The maximum amount of time a connection attempt or RPC call
can take before a TimeoutError is raised.
(:py:func:`~baseplate.lib.config.Timespan`)
* ``max_connection_attempts``: The maximum number of times the pool will attempt to
open a connection.
.. versionchanged:: 1.2
``max_retries`` was renamed ``max_connection_attempts``.
"""
assert prefix.endswith(".")
parser = config.SpecParser(
{
"endpoint": config.Endpoint,
"size": config.Optional(config.Integer, default=10),
"max_age": config.Optional(config.Timespan, default=config.Timespan("1 minute")),
"timeout": config.Optional(config.Timespan, default=config.Timespan("1 second")),
"max_connection_attempts": config.Optional(config.Integer),
"max_retries": config.Optional(config.Integer),
}
)
options = parser.parse(prefix[:-1], app_config)
if options.size is not None:
kwargs.setdefault("size", options.size)
if options.max_age is not None:
kwargs.setdefault("max_age", options.max_age.total_seconds())
if options.timeout is not None:
kwargs.setdefault("timeout", options.timeout.total_seconds())
if options.max_connection_attempts is not None:
``tracing.sample_rate`` (optional)
Percentage of unsampled requests to record traces for (e.g. "37%")
:param raw_config: The application configuration which should have settings
for the tracing client.
:param log_if_unconfigured: When the client is not configured, should
trace spans be logged or discarded silently?
:return: A configured client.
"""
cfg = config.parse_config(
raw_config,
{
"tracing": {
"service_name": config.String,
"endpoint": config.Optional(config.Endpoint),
"queue_name": config.Optional(config.String),
"max_span_queue_size": config.Optional(config.Integer, default=50000),
"num_span_workers": config.Optional(config.Integer, default=5),
"span_batch_interval": config.Optional(
config.Timespan, default=config.Timespan("500 milliseconds")
),
"num_conns": config.Optional(config.Integer, default=100),
"sample_rate": config.Optional(
config.Fallback(config.Percent, config.Float), default=0.1
),
}
},
)
# pylint: disable=maybe-no-member
return make_client(