Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
frame_rows[k].append(frame_row)
# compile it to a vdieo
images_dir = os.path.join(results_dir, 'frames_seed%4.4d' % opt.seed)
util.mkdir(images_dir)
for k in range(total_frames):
final_frame = np.concatenate(frame_rows[k], axis=1 - use_vertical)
util.save_image(final_frame, os.path.join(
images_dir, 'frame_%4.4d.jpg' % k))
video_file = os.path.join(
results_dir, 'morphing_video_seed%4.4d_fps%d.mp4' % (opt.seed, opt.fps))
video = moviepy.editor.VideoClip(
produce_frame, duration=float(total_frames) / opt.fps)
video.write_videofile(video_file, fps=30, codec='libx264', bitrate='16M')
def make_gif(images, fname, duration=2, true_image=False):
import moviepy.editor as mpy
def make_frame(t):
try:
x = images[int(len(images)/duration*t)]
except:
x = images[-1]
if true_image:
return x.astype(np.uint8)
else:
return ((x+1)/2*255).astype(np.uint8)
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_gif(fname, fps = len(images) / duration)
time_data = RaceData(configuration['source_telemetry'])
with tqdm(desc="Processing telemetry") as progress:
while True:
try:
_ = time_data.get_data()
progress.update()
except StopIteration:
break
source_video = mpy.ColorClip((1280, 1024)).set_duration(
time_data.elapsed_time)
pcre_standings = GTStandings(
race_data,
ups=framerate,
**configuration)
standings_clip_mask = mpy.VideoClip(
make_frame=pcre_standings.make_mask_frame,
ismask=True)
standings_clip = mpy.VideoClip(
make_frame=pcre_standings.make_frame
).set_mask(standings_clip_mask)
if sync:
def timecode_frame(time):
"""
Custom make frame for timecode.
"""
timecode_image = Image.new('RGB', (100, 40))
draw = ImageDraw.Draw(timecode_image)
draw.text((10, 10), "%.02f"%(time))
return PIL_to_npimage(timecode_image)
def make_gif(images, fname, duration=2, true_image=False):
import moviepy.editor as mpy
def make_frame(t):
try:
x = images[int(len(images)/duration*t)]
except:
x = images[-1]
if true_image:
return x.astype(np.uint8)
else:
return ((x+1)/2*255).astype(np.uint8)
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_gif(fname, fps=len(images) / duration)
def make_gif(images, fname, duration=2, true_image=False):
import moviepy.editor as mpy
def make_frame(t):
try:
x = images[int(len(images)/duration*t)]
except:
x = images[-1]
if true_image:
return x.astype(np.uint8)
else:
return ((x+1)/2*255).astype(np.uint8)
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_gif(fname, fps = len(images) / duration)
def make_gif(images, fname, duration=2, true_image=False):
import moviepy.editor as mpy
def make_frame(t):
try:
x = images[int(len(images)/duration*t)]
except:
x = images[-1]
if true_image:
return x.astype(np.uint8)
else:
return ((x+1)/2*255).astype(np.uint8)
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_gif(fname, fps = len(images) / duration)
# Save projecton as csv
df = pd.DataFrame(index=labels, data=tsne_proj, columns=['x', 'y'])
df.to_csv('data/tsne.csv', index_label='label')
# 2d visualization
scatter(tsne_proj)
plt.savefig('images/tsne.png', dpi=120)
# Monkey patch in order to track model evolution
sklearn.manifold.t_sne._gradient_descent = _gradient_descent
# Positions of the map points at every iteration.
positions = []
X_proj = TSNE(random_state=RS).fit_transform(tsne_proj)
X_iter = np.dstack(position.reshape(-1, 2) for position in positions)
f, ax, sc = scatter(X_iter[..., -1])
animation = mpy.VideoClip(make_frame_mpl, duration=X_iter.shape[2] / 40.)
animation.write_gif("images/tsne.gif", fps=20)
def make_video(images, fps):
import moviepy.editor as mpy
duration = len(images) / fps
def make_frame(t):
try:
x = images[int(len(images) / duration * t)]
except:
x = images[-1]
return x.astype(np.uint8)
clip = mpy.VideoClip(make_frame, duration=duration)
clip.fps = fps
return clip
from moviepy.video.io.bindings import mplfig_to_npimage
from moviepy.editor import VideoClip
fig = model.make_figure()
plt.set_cmap('terrain')
plot_slice = slice(0,300)
model.plot(fig=fig,draw=False,plot_slice=plot_slice)
def make_frame_mpl(t):
model.resample_model()
model.plot(fig=fig,update=True,draw=False,plot_slice=plot_slice)
plt.tight_layout()
return mplfig_to_npimage(fig)
animation = VideoClip(make_frame_mpl, duration=10)
animation.write_videofile('gibbs.mp4',fps=30)
self.end = len(self.index) - 1
num_frames = self.end - start
self.iRec = start
self.scale = args.scale
self.keras_part = None
self.do_salient = False
if args.model is not None:
self.keras_part = get_model_by_type(args.type, cfg=self.cfg)
self.keras_part.load(args.model)
self.keras_part.compile()
if args.salient:
self.do_salient = self.init_salient(self.keras_part.model)
self.roi_mask = region_of_interest((self.cfg.TARGET_H, self.cfg.TARGET_W, self.cfg.TARGET_D), self.cfg.ROI_REGION)
print('making movie', args.out, 'from', num_frames, 'images')
clip = mpy.VideoClip(self.make_frame,
duration=((num_frames - 1) / self.cfg.DRIVE_LOOP_HZ))
clip.write_videofile(args.out, fps=self.cfg.DRIVE_LOOP_HZ)