How to use the mediapipe.util.sequence.media_sequence.set_clip_data_path function in mediapipe

To help you get started, we’ve selected a few mediapipe examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github google / mediapipe / mediapipe / examples / desktop / media_sequence / charades_dataset.py View on Github external
filepath = os.path.join(video_dir, "%s.mp4" % row["id"])
        actions = row["actions"].split(";")
        action_indices = []
        action_strings = []
        action_start_times = []
        action_end_times = []
        for action in actions:
          if not action:
            continue
          string, start, end = action.split(" ")
          action_indices.append(int(string[1:]))
          action_strings.append(bytes23(string))
          action_start_times.append(int(float(start) * SECONDS_TO_MICROSECONDS))
          action_end_times.append(int(float(end) * SECONDS_TO_MICROSECONDS))
        ms.set_example_id(bytes23(row["id"]), metadata)
        ms.set_clip_data_path(bytes23(filepath), metadata)
        ms.set_clip_start_timestamp(0, metadata)
        ms.set_clip_end_timestamp(
            int(float(row["length"]) * SECONDS_TO_MICROSECONDS), metadata)
        ms.set_segment_start_timestamp(action_start_times, metadata)
        ms.set_segment_end_timestamp(action_end_times, metadata)
        ms.set_segment_label_string(action_strings, metadata)
        ms.set_segment_label_index(action_indices, metadata)
        yield metadata
github google / mediapipe / mediapipe / examples / desktop / media_sequence / kinetics_dataset.py View on Github external
continue
        # rename the row with a constitent set of names.
        if len(csv_row) == 5:
          row = dict(zip(["label_name", "video", "start", "end", "split"],
                         csv_row))
        else:
          row = dict(zip(["video", "start", "end", "split"],
                         csv_row))
        metadata = tf.train.SequenceExample()
        ms.set_example_id(bytes23(row["video"] + "_" + row["start"]),
                          metadata)
        ms.set_clip_media_id(bytes23(row["video"]), metadata)
        ms.set_clip_alternative_media_id(bytes23(row["split"]), metadata)
        if video_path_format_string:
          filepath = video_path_format_string.format(**row)
          ms.set_clip_data_path(bytes23(filepath), metadata)
        assert row["start"].isdigit(), "Invalid row: %s" % str(row)
        assert row["end"].isdigit(), "Invalid row: %s" % str(row)
        if "label_name" in row:
          ms.set_clip_label_string([bytes23(row["label_name"])], metadata)
          if label_map:
            ms.set_clip_label_index([label_map[row["label_name"]]], metadata)
        yield metadata
github google / mediapipe / mediapipe / examples / desktop / media_sequence / demo_dataset.py View on Github external
urlretrieve = urllib.urlretrieve
    for split in SPLITS:
      reader = csv.DictReader(SPLITS[split].split("\n"))
      all_metadata = []
      for row in reader:
        url = row["url"]
        basename = url.split("/")[-1]
        local_path = os.path.join(self.path_to_data, basename)
        if not tf.io.gfile.exists(local_path):
          urlretrieve(url, local_path)

        for start_time in range(0, int(row["duration"]), SECONDS_PER_EXAMPLE):
          metadata = tf.train.SequenceExample()
          ms.set_example_id(bytes23(basename + "_" + str(start_time)),
                            metadata)
          ms.set_clip_data_path(bytes23(local_path), metadata)
          ms.set_clip_start_timestamp(start_time * MICROSECONDS_PER_SECOND,
                                      metadata)
          ms.set_clip_end_timestamp(
              (start_time + SECONDS_PER_EXAMPLE) * MICROSECONDS_PER_SECOND,
              metadata)
          ms.set_clip_label_index((int(row["label index"]),), metadata)
          ms.set_clip_label_string((bytes23(row["label string"]),),
                                   metadata)
          all_metadata.append(metadata)
      random.seed(47)
      random.shuffle(all_metadata)
      shard_names = [self._indexed_shard(split, i) for i in range(NUM_SHARDS)]
      writers = [tf.io.TFRecordWriter(shard_name) for shard_name in shard_names]
      with _close_on_exit(writers) as writers:
        for i, seq_ex in enumerate(all_metadata):
          for graph in GRAPHS:
github google / mediapipe / mediapipe / examples / desktop / youtube8m / generate_input_sequence_example.py View on Github external
def main(argv):
  if len(argv) > 3:
    raise app.UsageError('Too many command-line arguments.')

  if not flags.FLAGS.path_to_input_video:
    raise ValueError('You must specify the path to the input video.')
  if not flags.FLAGS.clip_end_time_sec:
    raise ValueError('You must specify the clip end timestamp in seconds.')
  if flags.FLAGS.clip_start_time_sec >= flags.FLAGS.clip_end_time_sec:
    raise ValueError(
        'The clip start time must be greater than the clip end time.')
  metadata = tf.train.SequenceExample()
  ms.set_clip_data_path(bytes23(flags.FLAGS.path_to_input_video), metadata)
  ms.set_clip_start_timestamp(
      flags.FLAGS.clip_start_time_sec * SECONDS_TO_MICROSECONDS, metadata)
  ms.set_clip_end_timestamp(
      flags.FLAGS.clip_end_time_sec * SECONDS_TO_MICROSECONDS, metadata)
  with open('/tmp/mediapipe/metadata.pb', 'wb') as writer:
    writer.write(metadata.SerializeToString())

mediapipe

MediaPipe is the simplest way for researchers and developers to build world-class ML solutions and applications for mobile, edge, cloud and the web.

Apache-2.0
Latest version published 8 days ago

Package Health Score

91 / 100
Full package analysis