Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# validating configs
if KOA_CONFIG.cost_model == 'CHARGE_BACK' and KOA_CONFIG.billing_hourly_rate <= 0.0:
KOA_LOGGER.fatal('invalid billing hourly rate for CHARGE_BACK cost allocation')
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Kubernetes Opex Analytics Backend')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + KOA_CONFIG.version)
args = parser.parse_args()
th_puller = threading.Thread(target=create_metrics_puller)
th_exporter = threading.Thread(target=dump_analytics)
th_puller.start()
th_exporter.start()
if not KOA_CONFIG.enable_debug:
waitress_serve(wsgi_dispatcher, listen='*:5483')
else:
app.run(host='0.0.0.0', port=5483)
@staticmethod
def waitress(app, address, **options):
from waitress import serve
serve(app, host=address[0], port=address[1], _quiet=True)
def run(self, host='0.0.0.0', port=8080):
"""
Launch a development web server.
"""
waitress.serve(self, host=host, port=port)
@app.route("/", defaults={"path": ""}, methods=["POST", "GET"])
@app.route("/", methods=["POST", "GET"])
def main_route(path):
raw_body = os.getenv("RAW_BODY", "false")
as_text = True
if is_true(raw_body):
as_text = False
ret = handler.handle(request.get_data(as_text=as_text))
return ret
if __name__ == '__main__':
serve(app, host='0.0.0.0', port=5000)
def launchWebEvolutionaryInfo():
print("WEBSERVER MODE")
webpageTitle = "japonicus evolutionary statistics - v%.2f" % VERSION
webApp, webServer = promoterz.webServer.core.build_server(webpageTitle)
webServerProcess = Thread(
target=waitress.serve,
kwargs={
"app": webServer,
"listen": "0.0.0.0:8182"
}
)
webServerProcess.start()
return webApp
else:
setloglevel(sio_logger,waptconfig.loglevel)
if waptrepositories_api is not None:
waptrepositories_api.sio = sio
if options.devel:
#socketio_server.run(app,host='127.0.0.1', port=8088)
logger.info('Starting local dev waptservice...')
app.run(host='127.0.0.1',port=8088,debug=False)
else:
#wsgi.server(eventlet.listen(('', 8088)), app)
port_config = []
if waptconfig.waptservice_port:
server = serve(app ,host='127.0.0.1' , port=waptconfig.waptservice_port)
waitress_logger = logging.getLogger('waitress')
if options.loglevel:
setloglevel(waitress_logger ,options.loglevel)
else:
setloglevel(waitress_logger ,waptconfig.loglevel)
cx_logger().info("TensorFlow model signature: {}".format(local_cache["client"].input_signature))
waitress_kwargs = {}
if api["predictor"].get("config") is not None:
for key, value in api["predictor"]["config"].items():
if key.startswith("waitress_"):
waitress_kwargs[key[len("waitress_") :]] = value
if len(waitress_kwargs) > 0:
cx_logger().info("waitress parameters: {}".format(waitress_kwargs))
waitress_kwargs["listen"] = "*:{}".format(args.port)
cx_logger().info("{} api is live".format(api["name"]))
open("/health_check.txt", "a").close()
serve(app, **waitress_kwargs)
# [3]
if __name__ == '__main__':
authn_policy = DumbAuthenticationPolicy()
authz_policy = DumbAuthorizationPolicy()
config = Configurator(
authentication_policy=authn_policy,
authorization_policy=authz_policy
)
config.add_route('blogentry_show', '/blog/{id}')
config.add_route('blogentry_delete', '/blog/{id}/delete')
config.add_route('login', '/login')
config.add_route('logout', '/logout')
config.scan()
app = config.make_wsgi_app()
serve(app, threads=1)
python based waitress server in the provided host and
port as requested.
For more information on the waitress http server please
refer to https://pypi.python.org/pypi/waitress.
:type host: String
:param host: The host name of ip address to bind the server
to, this value should be represented as a string.
:type port: int
:param port: The tcp port for the bind operation of the
server (listening operation).
"""
import waitress
waitress.serve(self.application, host = host, port = port)