Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def export_tfjs_model(yolo, path):
import tensorflowjs as tfjs
import tempfile
overwrite_path(path)
temp_savedmodel_dir = tempfile.mktemp(suffix='.savedmodel')
tf.keras.experimental.export_saved_model(
yolo.yolo_model, temp_savedmodel_dir, serving_only=True)
tfjs.converters.tf_saved_model_conversion_v2.convert_tf_saved_model(
temp_savedmodel_dir,
path,
signature_def='serving_default',
saved_model_tags='serve')
# tfjs.converters.save_keras_model(yolo.yolo_model,
if not os.path.exists(h5_path):
raise ValueError('Nonexistent path to HDF5 file: %s' % h5_path)
if os.path.isdir(h5_path):
raise ValueError(
'Expected path to point to an HDF5 file, but it points to a '
'directory: %s' % h5_path)
temp_savedmodel_dir = tempfile.mktemp(suffix='.savedmodel')
model = keras.models.load_model(h5_path)
keras.experimental.export_saved_model(
model, temp_savedmodel_dir, serving_only=True)
# NOTE(cais): This cannot use `tf.compat.v1` because
# `convert_tf_saved_model()` works only in v2.
tf_saved_model_conversion_v2.convert_tf_saved_model(
temp_savedmodel_dir, output_dir,
signature_def='serving_default',
saved_model_tags='serve',
quantization_dtype=quantization_dtype,
skip_op_check=skip_op_check,
strip_debug_ops=strip_debug_ops)
# Clean up the temporary SavedModel directory.
shutil.rmtree(temp_savedmodel_dir)
if not os.path.exists(h5_path):
raise ValueError('Nonexistent path to HDF5 file: %s' % h5_path)
if os.path.isdir(h5_path):
raise ValueError(
'Expected path to point to an HDF5 file, but it points to a '
'directory: %s' % h5_path)
temp_savedmodel_dir = tempfile.mktemp(suffix='.savedmodel')
model = keras.models.load_model(h5_path)
keras.experimental.export_saved_model(
model, temp_savedmodel_dir, serving_only=True)
# NOTE(cais): This cannot use `tf.compat.v1` because
# `convert_tf_saved_model()` works only in v2.
tf_saved_model_conversion_v2.convert_tf_saved_model(
temp_savedmodel_dir, output_dir,
signature_def='serving_default',
saved_model_tags='serve',
quantization_dtype=quantization_dtype,
skip_op_check=skip_op_check,
strip_debug_ops=strip_debug_ops)
# Clean up the temporary SavedModel directory.
shutil.rmtree(temp_savedmodel_dir)
elif (input_format == common.TFJS_LAYERS_MODEL and
output_format == common.TFJS_LAYERS_MODEL):
dispatch_tensorflowjs_to_tensorflowjs_conversion(
args.input_path, args.output_path,
quantization_dtype=_parse_quantization_bytes(args.quantization_bytes),
weight_shard_size_bytes=weight_shard_size_bytes)
elif (input_format == common.TFJS_LAYERS_MODEL and
output_format == common.TFJS_GRAPH_MODEL):
dispatch_tfjs_layers_model_to_tfjs_graph_conversion(
args.input_path, args.output_path,
quantization_dtype=_parse_quantization_bytes(args.quantization_bytes),
skip_op_check=args.skip_op_check,
strip_debug_ops=args.strip_debug_ops)
elif (input_format == common.TF_FROZEN_MODEL and
output_format == common.TFJS_GRAPH_MODEL):
tf_saved_model_conversion_v2.convert_tf_frozen_model(
args.input_path, args.output_node_names, args.output_path,
quantization_dtype=_parse_quantization_bytes(args.quantization_bytes),
skip_op_check=args.skip_op_check,
strip_debug_ops=args.strip_debug_ops)
else:
raise ValueError(
'Unsupported input_format - output_format pair: %s - %s' %
(input_format, output_format))
dispatch_keras_saved_model_to_tensorflowjs_conversion(
args.input_path, args.output_path,
quantization_dtype=quantization_dtype,
split_weights_by_layer=args.split_weights_by_layer)
elif (input_format == common.TF_SAVED_MODEL and
output_format == common.TFJS_GRAPH_MODEL):
tf_saved_model_conversion_v2.convert_tf_saved_model(
args.input_path, args.output_path,
signature_def=args.signature_name,
saved_model_tags=args.saved_model_tags,
quantization_dtype=quantization_dtype,
skip_op_check=args.skip_op_check,
strip_debug_ops=args.strip_debug_ops)
elif (input_format == common.TF_HUB_MODEL and
output_format == common.TFJS_GRAPH_MODEL):
tf_saved_model_conversion_v2.convert_tf_hub_module(
args.input_path, args.output_path, args.signature_name,
args.saved_model_tags, skip_op_check=args.skip_op_check,
strip_debug_ops=args.strip_debug_ops)
elif (input_format == common.TFJS_LAYERS_MODEL and
output_format == common.KERAS_MODEL):
dispatch_tensorflowjs_to_keras_h5_conversion(args.input_path,
args.output_path)
elif (input_format == common.TFJS_LAYERS_MODEL and
output_format == common.KERAS_SAVED_MODEL):
dispatch_tensorflowjs_to_keras_saved_model_conversion(args.input_path,
args.output_path)
elif (input_format == common.TFJS_LAYERS_MODEL and
output_format == common.TFJS_LAYERS_MODEL):
dispatch_tensorflowjs_to_tensorflowjs_conversion(
args.input_path, args.output_path,
quantization_dtype=_parse_quantization_bytes(args.quantization_bytes),
dispatch_keras_saved_model_to_tensorflowjs_conversion(
args.input_path, args.output_path,
quantization_dtype=quantization_dtype,
split_weights_by_layer=args.split_weights_by_layer)
elif (input_format == common.TF_SAVED_MODEL and
output_format == common.TFJS_GRAPH_MODEL):
tf_saved_model_conversion_v2.convert_tf_saved_model(
args.input_path, args.output_path,
signature_def=args.signature_name,
saved_model_tags=args.saved_model_tags,
quantization_dtype=quantization_dtype,
skip_op_check=args.skip_op_check,
strip_debug_ops=args.strip_debug_ops)
elif (input_format == common.TF_HUB_MODEL and
output_format == common.TFJS_GRAPH_MODEL):
tf_saved_model_conversion_v2.convert_tf_hub_module(
args.input_path, args.output_path, args.signature_name,
args.saved_model_tags, skip_op_check=args.skip_op_check,
strip_debug_ops=args.strip_debug_ops)
elif (input_format == common.TFJS_LAYERS_MODEL and
output_format == common.KERAS_MODEL):
dispatch_tensorflowjs_to_keras_h5_conversion(args.input_path,
args.output_path)
elif (input_format == common.TFJS_LAYERS_MODEL and
output_format == common.KERAS_SAVED_MODEL):
dispatch_tensorflowjs_to_keras_saved_model_conversion(args.input_path,
args.output_path)
elif (input_format == common.TFJS_LAYERS_MODEL and
output_format == common.TFJS_LAYERS_MODEL):
dispatch_tensorflowjs_to_tensorflowjs_conversion(
args.input_path, args.output_path,
quantization_dtype=_parse_quantization_bytes(args.quantization_bytes),