diff --git a/README.md b/README.md index 2db51c8ad..f25bf5f61 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,8 @@ [![MoviePy page on the Python Package Index](https://badge.fury.io/py/moviepy.svg)](https://pypi.org/project/moviepy/) [![Discuss MoviePy on Gitter](https://img.shields.io/gitter/room/movie-py/gitter?color=46BC99&logo=gitter)](Gitter_) [![Build status on gh-actions](https://img.shields.io/github/actions/workflow/status/Zulko/moviepy/test_suite.yml?logo=github)](https://github.com/Zulko/moviepy/actions/workflows/test_suite.yml) [![Code coverage from coveralls.io](https://img.shields.io/coveralls/github/Zulko/moviepy/master?logo=coveralls)](https://coveralls.io/github/Zulko/moviepy?branch=master) -> [!NOTE] MoviePy recently upgraded to v2.0, introducing major breaking changes. You can consult the last v1 docs [here](https://zulko.github.io/moviepy/v1.0.3/) but beware that v1 is no longer maintained. For more info on how to update your code from v1 to v2, see [this guide](https://zulko.github.io/moviepy/getting_started/updating_to_v2.html). +> [!NOTE] +> MoviePy recently upgraded to v2.0, introducing major breaking changes. You can consult the last v1 docs [here](https://zulko.github.io/moviepy/v1.0.3/) but beware that v1 is no longer maintained. For more info on how to update your code from v1 to v2, see [this guide](https://zulko.github.io/moviepy/getting_started/updating_to_v2.html). MoviePy (online documentation [here](https://zulko.github.io/moviepy/)) is a Python library for video editing: cuts, concatenations, title @@ -20,26 +21,28 @@ In this example we open a video file, select the subclip between 10 and result to a new file: ``` python -# Import everything needed to edit video clips -from moviepy import * +from moviepy import VideoFileClip, TextClip, CompositeVideoClip # Load file example.mp4 and keep only the subclip from 00:00:10 to 00:00:20 -clip = VideoFileClip("long_examples/example2.mp4").subclipped(10, 20) - # Reduce the audio volume to 80% of its original volume -clip = clip.with_volume_scaled(0.8) -# Generate a text clip. You can customize the font, color, etc. -txt_clip = TextClip(font="example.ttf", text="Big Buck Bunny", font_size=70, color='white') +clip = ( + VideoFileClip("long_examples/example2.mp4") + .subclipped(10, 20) + .with_volume_scaled(0.8) +) -#The text clip should appear for 10s at the center of the screen -txt_clip = txt_clip.with_duration(10).with_position('center') +# Generate a text clip. You can customize the font, color, etc. +txt_clip = TextClip( + font="Arial.ttf", + text="Hello there!", + font_size=70, + color='white' +).with_duration(10).with_position('center') # Overlay the text clip on the first video clip -video = CompositeVideoClip([clip, txt_clip]) - -# Write the result to a file (many options available!) -video.write_videofile("result.mp4") +final_video = CompositeVideoClip([clip, txt_clip]) +final_video.write_videofile("result.mp4") ``` # Installation diff --git a/docs/_static/code/user_guide/compositing/CompositeAudioClip.py b/docs/_static/code/user_guide/compositing/CompositeAudioClip.py index 121dc4b24..dd8217e49 100644 --- a/docs/_static/code/user_guide/compositing/CompositeAudioClip.py +++ b/docs/_static/code/user_guide/compositing/CompositeAudioClip.py @@ -1,18 +1,22 @@ -from moviepy import * +"""Let's first concatenate (one after the other) then composite +(on top of each other) three audio clips.""" + +from moviepy import AudioFileClip, CompositeAudioClip, concatenate_audioclips # We load all the clips we want to compose -aclip1 = AudioFileClip("example.wav") -aclip2 = AudioFileClip("example2.wav") -aclip3 = AudioFileClip("example3.wav") +clip1 = AudioFileClip("example.wav") +clip2 = AudioFileClip("example2.wav") +clip3 = AudioFileClip("example3.wav") # All clip will play one after the other -concat = concatenate_audioclips([aclip1, aclip2, aclip3]) +concat = concatenate_audioclips([clip1, clip2, clip3]) -# We will play aclip1, then ontop of it aclip2 after 5s, and the aclip3 on top of both after 9s +# We will play clip1, then on top of it clip2 starting at t=5s, +# and clip3 on top of both starting t=9s compo = CompositeAudioClip( [ - aclip1.with_volume_scaled(1.2), - aclip2.with_start(5), # start at t=5s - aclip3.with_start(9), + clip1.with_volume_scaled(1.2), + clip2.with_start(5), # start at t=5s + clip3.with_start(9), ] ) diff --git a/docs/_static/code/user_guide/compositing/CompositeVideoClip.py b/docs/_static/code/user_guide/compositing/CompositeVideoClip.py index 5da6b5181..1524c526b 100644 --- a/docs/_static/code/user_guide/compositing/CompositeVideoClip.py +++ b/docs/_static/code/user_guide/compositing/CompositeVideoClip.py @@ -1,10 +1,14 @@ -from moviepy import * +"""Let's stack three video clips on top of each other with +CompositeVideoClip.""" + +from moviepy import VideoFileClip, CompositeVideoClip # We load all the clips we want to compose clip1 = VideoFileClip("example.mp4") clip2 = VideoFileClip("example2.mp4").subclipped(0, 1) -clip3 = VideoFileClip("example3.mp4") +clip3 = VideoFileClip("example.mp4") -# We concatenate them and write theme stacked on top of each other, with clip3 over clip2 over clip1 +# We concatenate them and write theme stacked on top of each other, +# with clip3 over clip2 over clip1 final_clip = CompositeVideoClip([clip1, clip2, clip3]) final_clip.write_videofile("final_clip.mp4") diff --git a/docs/_static/code/user_guide/compositing/concatenate.py b/docs/_static/code/user_guide/compositing/concatenate.py index 5bdaf0252..cb9331907 100644 --- a/docs/_static/code/user_guide/compositing/concatenate.py +++ b/docs/_static/code/user_guide/compositing/concatenate.py @@ -1,3 +1,5 @@ +"""Let's concatenate (play one after the other) three video clips.""" + from moviepy import VideoFileClip, concatenate_videoclips # We load all the clips we want to concatenate diff --git a/docs/_static/code/user_guide/compositing/crossfadein.py b/docs/_static/code/user_guide/compositing/crossfadein.py index 3127e1832..0c36713f8 100644 --- a/docs/_static/code/user_guide/compositing/crossfadein.py +++ b/docs/_static/code/user_guide/compositing/crossfadein.py @@ -1,18 +1,15 @@ -from moviepy import * +"""In this example, we will concatenate two clips with a 1-second +crossfadein of the second clip.""" + +from moviepy import VideoFileClip, CompositeVideoClip, vfx # We load all the clips we want to compose clip1 = VideoFileClip("example.mp4") -clip2 = VideoFileClip("example2.mp4").subclipped(0, 1) - -# Clip2 will be on top of clip1 for 1s -clip1 = clip1.with_end(2) -clip2 = clip2.with_start(1) - -# We will add a crossfadein on clip2 for 1s -# As the other effects, transitions are added to Clip methods at runtime -clip2 = clip2.with_effects([vfx.CrossFadeIn(1)]) - +clip2 = VideoFileClip("example2.mp4") -# We write the result -final_clip = CompositeVideoClip([clip1, clip2]) +clips = [ + clip1.with_end(2), + clip2.with_start(1).with_effects([vfx.CrossFadeIn(1)]), +] +final_clip = CompositeVideoClip(clips) final_clip.write_videofile("final_clip.mp4") diff --git a/docs/_static/code/user_guide/compositing/juxtaposing.py b/docs/_static/code/user_guide/compositing/juxtaposing.py index 3d9b6d9d9..3f032643e 100644 --- a/docs/_static/code/user_guide/compositing/juxtaposing.py +++ b/docs/_static/code/user_guide/compositing/juxtaposing.py @@ -1,3 +1,5 @@ +"""Let's juxtapose four video clips in a 2x2 grid.""" + from moviepy import VideoFileClip, clips_array, vfx # We will use the same clip and transform it in 3 ways @@ -8,9 +10,12 @@ # The form of the final clip will depend of the shape of the array # We want our clip to be our 4 videos, 2x2, so we make an array of 2x2 -final_clip = clips_array([[clip1, clip2], [clip3, clip4]]) -final_clip = final_clip.resized( - width=480 -) # We resize the resulting clip to have the dimensions we want +array = [ + [clip1, clip2], + [clip3, clip4], +] +final_clip = clips_array(array) +# let's resize the final clip so it has 480px of width +final_clip = final_clip.resized(width=480) final_clip.write_videofile("final_clip.mp4") diff --git a/docs/_static/code/user_guide/compositing/with_position.py b/docs/_static/code/user_guide/compositing/with_position.py index b020db854..9fca3ec41 100644 --- a/docs/_static/code/user_guide/compositing/with_position.py +++ b/docs/_static/code/user_guide/compositing/with_position.py @@ -1,4 +1,6 @@ -from moviepy import * +"""Let's position some text and images on a video.""" + +from moviepy import TextClip, VideoFileClip, CompositeVideoClip, ImageClip # We load all the clips we want to compose background = VideoFileClip("example2.mp4").subclipped(0, 2) @@ -28,8 +30,9 @@ ) logo = ImageClip("./example2.png", duration=1).resized(height=50) -# We want our title to be at the center horizontaly and start at 25% of the video verticaly -# We can set as "center", "left", "right", "top" and "bottom", and % relative from the clip size +# We want our title to be at the center horizontaly and start at 25% +# of the video verticaly. We can set as "center", "left", "right", +# "top" and "bottom", and % relative from the clip size title = title.with_position(("center", 0.25), relative=True) # We want the author to be in the center, 30px under the title @@ -42,7 +45,8 @@ copyright = copyright.with_position(("center", background.h - copyright.h - 30)) # Finally, we want the logo to be in the center, but to drop as time pass -# We can do so by setting position as a function that take time as argument, a lot like frame_function +# We can do so by setting position as a function that take time as argument, +# a lot like frame_function top = (background.h - logo.h) / 2 logo = logo.with_position(lambda t: ("center", top + t * 30)) diff --git a/docs/_static/code/user_guide/compositing/with_start.py b/docs/_static/code/user_guide/compositing/with_start.py index ae06bb59d..9d6bf84a1 100644 --- a/docs/_static/code/user_guide/compositing/with_start.py +++ b/docs/_static/code/user_guide/compositing/with_start.py @@ -1,4 +1,4 @@ -from moviepy import * +from moviepy import VideoFileClip, CompositeVideoClip # We load all the clips we want to compose clip1 = VideoFileClip("example.mp4") @@ -12,9 +12,9 @@ clip2 = clip2.with_start(1.5) # We want to play clip3 at the end of clip2, and so for 3 seconds only -clip3 = clip3.with_start(clip2.end).with_duration( - 1 -) # Some times its more practical to modify the duration of a clip instead of his end +# Some times its more practical to modify the duration of a clip instead +# of his end +clip3 = clip3.with_start(clip2.end).with_duration(1) # We write the result final_clip = CompositeVideoClip([clip1, clip2, clip3]) diff --git a/docs/_static/code/user_guide/effects/custom_effect.py b/docs/_static/code/user_guide/effects/custom_effect.py index 51bf04b95..bfe75a00b 100644 --- a/docs/_static/code/user_guide/effects/custom_effect.py +++ b/docs/_static/code/user_guide/effects/custom_effect.py @@ -1,3 +1,6 @@ +"""Let's write a custom effect that will add a basic progress bar +at the bottom of our clip.""" + from moviepy import VideoClip from moviepy.decorators import requires_duration @@ -22,7 +25,8 @@ def filter(get_frame, t): progression = t / clip.duration bar_width = int(progression * clip.w) - # Showing a progress bar is just replacing bottom pixels on some part of our frame + # Showing a progress bar is just replacing bottom pixels + # on some part of our frame frame = get_frame(t) frame[-height:, 0:bar_width] = color diff --git a/docs/_static/code/user_guide/effects/image_transform.py b/docs/_static/code/user_guide/effects/image_transform.py index 2983b8125..1d2b6cde7 100644 --- a/docs/_static/code/user_guide/effects/image_transform.py +++ b/docs/_static/code/user_guide/effects/image_transform.py @@ -1,3 +1,5 @@ +"""Let's invert the green and blue channels of a video.""" + from moviepy import VideoFileClip import numpy diff --git a/docs/_static/code/user_guide/effects/modify_copy_example.py b/docs/_static/code/user_guide/effects/modify_copy_example.py index d965b0a03..6cb001c11 100644 --- a/docs/_static/code/user_guide/effects/modify_copy_example.py +++ b/docs/_static/code/user_guide/effects/modify_copy_example.py @@ -1,19 +1,21 @@ # Import everything needed to edit video clips -from moviepy import * +from moviepy import VideoFileClip # Load example.mp4 clip = VideoFileClip("example.mp4") -# This does nothing, as multiply_volume will return a copy of clip which you will loose immediatly as you dont store it +# This does nothing, as multiply_volume will return a copy of clip +# which you will loose immediatly as you dont store it # If you was to render clip now, the audio would still be at full volume clip.with_volume_scaled(0.1) -# This create a copy of clip in clip_whisper with a volume of only 10% the original, but does not modify the original clip +# This create a copy of clip in clip_whisper with a volume of only 10% the original, +# but does not modify the original clip # If you was to render clip right now, the audio would still be at full volume # If you was to render clip_whisper, the audio would be a 10% of the original volume clip_whisper = clip.with_volume_scaled(0.1) -# This replace the original clip with a copy of it where volume is only 10% of the original -# If you was to render clip now, the audio would be at 10% +# This replace the original clip with a copy of it where volume is only 10% of +# the original. If you was to render clip now, the audio would be at 10% # The original clip is now lost clip = clip.with_volume_scaled(0.1) diff --git a/docs/_static/code/user_guide/effects/time_transform.py b/docs/_static/code/user_guide/effects/time_transform.py index c5ccad256..86e837111 100644 --- a/docs/_static/code/user_guide/effects/time_transform.py +++ b/docs/_static/code/user_guide/effects/time_transform.py @@ -3,13 +3,7 @@ my_clip = VideoFileClip("example.mp4") - -# You can define a function the classical way -def accel_x3(time: float) -> float: - return time * 3 - - -modified_clip1 = my_clip.time_transform(accel_x3) - -# Of you can also use lambda function +# Let's accelerate the video by a factor of 3 +modified_clip1 = my_clip.time_transform(lambda t: t * 3) +# Let's play the video back and forth with a "sine" time-warping effect modified_clip2 = my_clip.time_transform(lambda t: 1 + math.sin(t)) diff --git a/docs/_static/code/user_guide/effects/transform.py b/docs/_static/code/user_guide/effects/transform.py index 38678f865..2a9d0410c 100644 --- a/docs/_static/code/user_guide/effects/transform.py +++ b/docs/_static/code/user_guide/effects/transform.py @@ -1,5 +1,6 @@ +"""Let's create a scolling video effect from scratch.""" + from moviepy import VideoFileClip -import math my_clip = VideoFileClip("example.mp4") diff --git a/docs/_static/code/user_guide/effects/using_effects.py b/docs/_static/code/user_guide/effects/using_effects.py index 8cf6cc933..06a52006f 100644 --- a/docs/_static/code/user_guide/effects/using_effects.py +++ b/docs/_static/code/user_guide/effects/using_effects.py @@ -2,14 +2,13 @@ from moviepy import vfx, afx myclip = VideoFileClip("example.mp4") -myclip = myclip.with_effects( - [vfx.Resize(width=460)] -) # resize clip to be 460px in width, keeping aspect ratio +# resize clip to be 460px in width, keeping aspect ratio +myclip = myclip.with_effects([vfx.Resize(width=460)]) # fx method return a copy of the clip, so we can easily chain them -myclip = myclip.with_effects( - [vfx.MultiplySpeed(2), afx.MultiplyVolume(0.5)] -) # double the speed and half the audio volume +# double the speed and half the audio volume +myclip = myclip.with_effects([vfx.MultiplySpeed(2), afx.MultiplyVolume(0.5)]) -# because effects are added to Clip at runtime, you can also call them directly from your clip as methods +# because effects are added to Clip at runtime, you can also call +# them directly from your clip as methods myclip = myclip.with_effects([vfx.MultiplyColor(0.5)]) # darken the clip diff --git a/docs/_static/code/user_guide/effects/using_with_methods.py b/docs/_static/code/user_guide/effects/using_with_methods.py index 0267dc189..eae952147 100644 --- a/docs/_static/code/user_guide/effects/using_with_methods.py +++ b/docs/_static/code/user_guide/effects/using_with_methods.py @@ -1,5 +1,4 @@ from moviepy import VideoFileClip -from moviepy import vfx, afx myclip = VideoFileClip("example.mp4") myclip = myclip.with_end(5) # stop the clip after 5 sec diff --git a/docs/_static/code/user_guide/loading/AudioArrayClip.py b/docs/_static/code/user_guide/loading/AudioArrayClip.py index 6a02d41a0..fafc80269 100644 --- a/docs/_static/code/user_guide/loading/AudioArrayClip.py +++ b/docs/_static/code/user_guide/loading/AudioArrayClip.py @@ -1,5 +1,7 @@ +"""Let's create an audioclip from values in a numpy array.""" + import numpy as np -from moviepy import * +from moviepy import AudioArrayClip # We want to play those notes notes = {"A": 440, "B": 494, "C": 523, "D": 587, "E": 659, "F": 698} @@ -9,26 +11,25 @@ sample_rate = 44100 # Number of samples per second note_size = int(note_duration * sample_rate) -total_size = note_size * len(notes) +n_frames = note_size * len(notes) def frame_function(t, note_frequency): return np.sin(note_frequency * 2 * np.pi * t) -# We generate all frames timepoints -times = np.linspace(0, total_duration, total_size) +# At this point one could use this audioclip which generates the audio on the fly +# clip = AudioFileClip(frame_function) -# We make an array of size N*1, where N is the number of frames * total duration -audio_array = np.zeros((total_size, 2)) -i = 0 -for note, frequency in notes.items(): - for _ in range(note_size): - audio_array[i][0] = frame_function(times[i], frequency) - i += 1 +# We generate all frames timepoints +audio_frame_values = [ + 2 * [frame_function(t, freq)] + for freq in notes.values() + for t in np.arange(0, note_duration, 1.0 / sample_rate) +] # Create an AudioArrayClip from the audio samples -audio_clip = AudioArrayClip(audio_array, fps=sample_rate) +audio_clip = AudioArrayClip(np.array(audio_frame_values), fps=sample_rate) # Write the audio clip to a WAV file audio_clip.write_audiofile("result.wav", fps=44100) diff --git a/docs/_static/code/user_guide/loading/AudioClip.py b/docs/_static/code/user_guide/loading/AudioClip.py index fe1785c8e..34af669af 100644 --- a/docs/_static/code/user_guide/loading/AudioClip.py +++ b/docs/_static/code/user_guide/loading/AudioClip.py @@ -1,8 +1,10 @@ -from moviepy import * +from moviepy import AudioClip import numpy as np -# Producing a sinewave of 440 Hz -> note A -frame_function_audio = lambda t: np.sin(440 * 2 * np.pi * t) -# AUDIO CLIPS -clip = AudioClip(frame_function_audio, duration=3) +def audio_frame(t): + """Producing a sinewave of 440 Hz -> note A""" + return np.sin(440 * 2 * np.pi * t) + + +audio_clip = AudioClip(frame_function=audio_frame, duration=3) diff --git a/docs/_static/code/user_guide/loading/AudioFileClip.py b/docs/_static/code/user_guide/loading/AudioFileClip.py index e9aecd89f..07825956f 100644 --- a/docs/_static/code/user_guide/loading/AudioFileClip.py +++ b/docs/_static/code/user_guide/loading/AudioFileClip.py @@ -1,5 +1,4 @@ from moviepy import * -import numpy as np # Works for audio files, but also videos file where you only want the keep the audio track clip = AudioFileClip("example.wav") diff --git a/docs/_static/code/user_guide/loading/ColorClip.py b/docs/_static/code/user_guide/loading/ColorClip.py index 9bf5d9677..9c85d962b 100644 --- a/docs/_static/code/user_guide/loading/ColorClip.py +++ b/docs/_static/code/user_guide/loading/ColorClip.py @@ -1,8 +1,6 @@ -from moviepy import * +from moviepy import ColorClip -myclip = ColorClip( - size=(200, 100), color=(255, 0, 0), duration=1 -) # Color is passed as a RGB tuple -myclip.write_videofile( - "result.mp4", fps=1 -) # We really dont need more than 1 fps do we ? +# Color is passed as a RGB tuple +myclip = ColorClip(size=(200, 100), color=(255, 0, 0), duration=1) +# We really dont need more than 1 fps do we ? +myclip.write_videofile("result.mp4", fps=1) diff --git a/docs/_static/code/user_guide/loading/DataVideoClip.py b/docs/_static/code/user_guide/loading/DataVideoClip.py index 3d59498f1..096406207 100644 --- a/docs/_static/code/user_guide/loading/DataVideoClip.py +++ b/docs/_static/code/user_guide/loading/DataVideoClip.py @@ -1,4 +1,6 @@ -from moviepy import * +"""Let's make a clip where frames depend on values in a list""" + +from moviepy import DataVideoClip import numpy as np # Dataset will just be a list of colors as RGB @@ -12,13 +14,15 @@ ] -# The function make frame take data and create an image of 200x100 px fill with the color +# The function make frame take data and create an image of 200x100 px +# filled with the color given in the dataset def frame_function(data): frame = np.full((100, 200, 3), data, dtype=np.uint8) return frame -# We create the DataVideoClip, and we set FPS at 2, making a 3s clip (because len(dataset) = 6, so 6/2=3) +# We create the DataVideoClip, and we set FPS at 2, making a 3s clip +# (because len(dataset) = 6, so 6/2=3) myclip = DataVideoClip(data=dataset, data_to_frame=frame_function, fps=2) # Modifying fps here will change video FPS, not clip FPS diff --git a/docs/_static/code/user_guide/loading/ImageClip.py b/docs/_static/code/user_guide/loading/ImageClip.py index f704a5500..1c5733026 100644 --- a/docs/_static/code/user_guide/loading/ImageClip.py +++ b/docs/_static/code/user_guide/loading/ImageClip.py @@ -1,4 +1,7 @@ -from moviepy import * +"""Here's how you transform a VideoClip into an ImageClip from an image, from +arbitrary data, or by extracting a frame at a given time""" + +from moviepy import ImageClip, VideoFileClip import numpy as np # Random RGB noise image of 200x100 @@ -6,6 +9,5 @@ myclip1 = ImageClip("example.png") # You can create it from a path myclip2 = ImageClip(noise_image) # from a (height x width x 3) RGB numpy array -myclip3 = VideoFileClip("./example.mp4").to_ImageClip( - t="00:00:01" -) # Or load videoclip and extract frame at a given time +# Or load videoclip and extract frame at a given time +myclip3 = VideoFileClip("./example.mp4").to_ImageClip(t="00:00:01") diff --git a/docs/_static/code/user_guide/loading/ImageSequenceClip.py b/docs/_static/code/user_guide/loading/ImageSequenceClip.py index a19432987..020324002 100644 --- a/docs/_static/code/user_guide/loading/ImageSequenceClip.py +++ b/docs/_static/code/user_guide/loading/ImageSequenceClip.py @@ -1,4 +1,4 @@ -from moviepy import * +from moviepy import ImageSequenceClip # A clip with a list of images showed for 1 second each myclip = ImageSequenceClip( @@ -9,19 +9,19 @@ ], durations=[1, 1, 1], ) -print( - "Clip duration: {}".format(myclip.duration) -) # 3 images, 1 seconds each, duration = 3 -print("Clip fps: {}".format(myclip.fps)) # 3 seconds, 3 images, fps is 3/3 = 1 +# 3 images, 1 seconds each, duration = 3 +print("Clip duration: {}".format(myclip.duration)) +# 3 seconds, 3 images, fps is 3/3 = 1 +print("Clip fps: {}".format(myclip.fps)) -# This time we will load all images in the dir, and instead of showing theme for X seconds, we will define FPS +# This time we will load all images in the dir, and instead of showing theme +# for X seconds, we will define FPS myclip2 = ImageSequenceClip("./example_img_dir", fps=30) -print( - "Clip duration: {}".format(myclip2.duration) -) # fps = 30, so duration = nb images in dir / 30 +# fps = 30, so duration = nb images in dir / 30 +print("Clip duration: {}".format(myclip2.duration)) print("Clip fps: {}".format(myclip2.fps)) # fps = 30 +# the gif will be 30 fps, its duration will depend on the number of +# images in dir myclip.write_gif("result.gif") # the gif will be 3 sec and 1 fps -myclip2.write_gif( - "result2.gif" -) # the gif will be 30 fps, duration will vary based on number of images in dir +myclip2.write_gif("result2.gif") diff --git a/docs/_static/code/user_guide/loading/TextClip.py b/docs/_static/code/user_guide/loading/TextClip.py index c3dd23105..490775487 100644 --- a/docs/_static/code/user_guide/loading/TextClip.py +++ b/docs/_static/code/user_guide/loading/TextClip.py @@ -1,4 +1,4 @@ -from moviepy import * +from moviepy import TextClip font = "./example.ttf" @@ -8,11 +8,10 @@ font=font, text="Hello World !", font_size=30, - color="#FF0000", + color="#FF0000", # Red bg_color="#FFFFFF", duration=2, -) # Red - +) # This time we load text from a file, we set a fixed size for clip and let the system find best font size, # allowing for line breaking txt_clip2 = TextClip( @@ -26,8 +25,6 @@ # we set duration, because by default image clip are infinite, and we cannot render infinite txt_clip2 = txt_clip2.with_duration(2) - -txt_clip1.write_videofile( - "result1.mp4", fps=24 -) # ImageClip have no FPS either, so we must defined it +# ImageClip have no FPS either, so we must defined it +txt_clip1.write_videofile("result1.mp4", fps=24) txt_clip2.write_videofile("result2.mp4", fps=24) diff --git a/docs/_static/code/user_guide/loading/UpdatedVideoClip.py b/docs/_static/code/user_guide/loading/UpdatedVideoClip.py index c45f7192a..c9a7a4920 100644 --- a/docs/_static/code/user_guide/loading/UpdatedVideoClip.py +++ b/docs/_static/code/user_guide/loading/UpdatedVideoClip.py @@ -1,17 +1,20 @@ -from moviepy import * +from moviepy import UpdatedVideoClip import numpy as np import random -# Imagine we want to make a video that become more and more red as we repeat same face on coinflip in a row -# because coinflip are done in real time, we need to wait until a winning row is done to be able -# to make the next frame. -# This is a world simulating that. Sorry, it's hard to come up with examples... class CoinFlipWorld: + """A simulation of coin flipping. + + Imagine we want to make a video that become more and more red as we repeat same face + on coinflip in a row because coinflip are done in real time, we need to wait + until a winning row is done to be able to make the next frame. + This is a world simulating that. Sorry, it's hard to come up with examples...""" + def __init__(self, fps): """ - FPS is usefull because we must increment clip_t by 1/FPS to have UpdatedVideoClip run with a certain FPS - + FPS is usefull because we must increment clip_t by 1/FPS to have + UpdatedVideoClip run with a certain FPS """ self.clip_t = 0 self.win_strike = 0 @@ -41,9 +44,9 @@ def update(self): self.clip_t += 1 / self.fps def to_frame(self): - red_intensity = 255 * ( - self.win_strike / 10 - ) # 100% red for 10 victories and more + """Return a frame of a 200x100 image with red more or less intense based + on number of victories in a row.""" + red_intensity = 255 * (self.win_strike / 10) red_intensity = min(red_intensity, 255) # A 200x100 image with red more or less intense based on number of victories in a row @@ -53,6 +56,7 @@ def to_frame(self): world = CoinFlipWorld(fps=5) myclip = UpdatedVideoClip(world=world, duration=10) -# We will set FPS to same as world, if we was to use a different FPS, the lowest from world.fps and our write_videofile fps param +# We will set FPS to same as world, if we was to use a different FPS, +# the lowest from world.fps and our write_videofile fps param # will be the real visible fps myclip.write_videofile("result.mp4", fps=5) diff --git a/docs/_static/code/user_guide/loading/VideoClip.py b/docs/_static/code/user_guide/loading/VideoClip.py index 2385eee3b..5952af359 100644 --- a/docs/_static/code/user_guide/loading/VideoClip.py +++ b/docs/_static/code/user_guide/loading/VideoClip.py @@ -1,6 +1,6 @@ from PIL import Image, ImageDraw import numpy as np -from moviepy import * +from moviepy import VideoClip import math WIDTH, HEIGHT = (128, 128) @@ -24,9 +24,7 @@ def frame_function(t): return np.array(img) # returns a 8-bit RGB array -clip = VideoClip( - frame_function, duration=2 -) # we define a 2s duration for the clip to be able to render it later -clip.write_gif( - "circle.gif", fps=15 -) # we must set a framerate because VideoClip have no framerate by default +# we define a 2s duration for the clip to be able to render it later +clip = VideoClip(frame_function, duration=2) +# we must set a framerate because VideoClip have no framerate by default +clip.write_gif("circle.gif", fps=15) diff --git a/docs/_static/code/user_guide/loading/VideoFileClip.py b/docs/_static/code/user_guide/loading/VideoFileClip.py index 2d24a8786..297af7175 100644 --- a/docs/_static/code/user_guide/loading/VideoFileClip.py +++ b/docs/_static/code/user_guide/loading/VideoFileClip.py @@ -1,4 +1,4 @@ -from moviepy import * +from moviepy import VideoFileClip myclip = VideoFileClip("example.mp4") @@ -9,7 +9,5 @@ myclip = myclip.subclipped(0.5, 2) # Cutting the clip between 0.5 and 2 secs. print("Clip duration: {}".format(myclip.duration)) # Cuting will update duration print("Clip fps: {}".format(myclip.fps)) # and keep fps - -myclip.write_videofile( - "result.mp4" -) # the output video will be 1.5 sec long and use original fps +# the output video will be 1.5 sec long and use original fps +myclip.write_videofile("result.mp4") diff --git a/docs/_static/code/user_guide/loading/closing.py b/docs/_static/code/user_guide/loading/closing.py index c8d818ff1..d0e38d08f 100644 --- a/docs/_static/code/user_guide/loading/closing.py +++ b/docs/_static/code/user_guide/loading/closing.py @@ -1,8 +1,9 @@ from moviepy import * +# clip.close() is implicitly called, so the lock on my_audiofile.mp3 file +# is immediately released. try: with AudioFileClip("example.wav") as clip: raise Exception("Let's simulate an exception") except Exception as e: print("{}".format(e)) -# clip.close() is implicitly called, so the lock on my_audiofile.mp3 file is immediately released. diff --git a/docs/_static/code/user_guide/loading/loading.py b/docs/_static/code/user_guide/loading/loading.py index 0166cb944..3ebc677ed 100644 --- a/docs/_static/code/user_guide/loading/loading.py +++ b/docs/_static/code/user_guide/loading/loading.py @@ -1,35 +1,47 @@ -from moviepy import * +from moviepy import ( + VideoClip, + VideoFileClip, + ImageSequenceClip, + ImageClip, + TextClip, + ColorClip, + AudioFileClip, + AudioClip, +) import numpy as np # Define some constants for later use black = (255, 255, 255) # RGB for black -# Random noise image of 200x100 -frame_function = lambda t: np.random.randint(low=0, high=255, size=(100, 200, 3)) -# A note by producing a sinewave of 440 Hz -frame_function_audio = lambda t: np.sin(440 * 2 * np.pi * t) + + +def frame_function(t): + """Random noise image of 200x100""" + return np.random.randint(low=0, high=255, size=(100, 200, 3)) + + +def frame_function_audio(t): + """A note by producing a sinewave of 440 Hz""" + return np.sin(440 * 2 * np.pi * t) + # Now lets see how to load different type of resources ! -# VIDEO CLIPS` -clip = VideoClip( - frame_function, duration=5 -) # for custom animations, where frame_function is a function returning an image as numpy array for a given time +# VIDEO CLIPS +# for custom animations, where frame_function is a function returning an image +# as numpy array for a given time +clip = VideoClip(frame_function, duration=5) clip = VideoFileClip("example.mp4") # for videos -clip = ImageSequenceClip( - "example_img_dir", fps=24 -) # for a list or directory of images to be used as a video sequence +# for a list or directory of images to be used as a video sequence +clip = ImageSequenceClip("example_img_dir", fps=24) clip = ImageClip("example.png") # For a picture -clip = TextClip( - font="./example.ttf", text="Hello!", font_size=70, color="black" -) # To create the image of a text -clip = ColorClip( - size=(460, 380), color=black -) # a clip of a single unified color, where color is a RGB tuple/array/list +# To create the image of a text +clip = TextClip(font="./example.ttf", text="Hello!", font_size=70, color="black") +# a clip of a single unified color, where color is a RGB tuple/array/list +clip = ColorClip(size=(460, 380), color=black) # AUDIO CLIPS -clip = AudioFileClip( - "example.wav" -) # for audio files, but also videos where you only want the keep the audio track -clip = AudioClip( - frame_function_audio, duration=3 -) # for custom audio, where frame_function is a function returning a float (or tuple for stereo) for a given time +# for audio files, but also videos where you only want the keep the audio track +clip = AudioFileClip("example.wav") +# for custom audio, where frame_function is a function returning a +# float (or tuple for stereo) for a given time +clip = AudioClip(frame_function_audio, duration=3) diff --git a/docs/_static/code/user_guide/loading/masks.py b/docs/_static/code/user_guide/loading/masks.py index 233313a20..a58f48de5 100644 --- a/docs/_static/code/user_guide/loading/masks.py +++ b/docs/_static/code/user_guide/loading/masks.py @@ -1,4 +1,4 @@ -from moviepy import * +from moviepy import VideoClip, ImageClip, VideoFileClip import numpy as np # Random RGB noise image of 200x100 diff --git a/docs/getting_started/install.rst b/docs/getting_started/install.rst index d4b03d31a..08c83ccd0 100644 --- a/docs/getting_started/install.rst +++ b/docs/getting_started/install.rst @@ -80,8 +80,8 @@ To test if FFmpeg and FFplay are found by MoviePy, in a Python console, you can .. code-block:: python - >>> from moviepy.config import check - >>> check() + from moviepy.config import check + check() .. _ffmpeg: https://www.ffmpeg.org/download.html diff --git a/moviepy/Clip.py b/moviepy/Clip.py index 4bdd95839..871ed8e3e 100644 --- a/moviepy/Clip.py +++ b/moviepy/Clip.py @@ -170,11 +170,13 @@ def time_transform(self, time_func, apply_to=None, keep_duration=False): Examples -------- - >>> # plays the clip (and its mask and sound) twice faster - >>> new_clip = clip.time_transform(lambda t: 2*t, apply_to=['mask', 'audio']) - >>> - >>> # plays the clip starting at t=3, and backwards: - >>> new_clip = clip.time_transform(lambda t: 3-t) + .. code:: python + + # plays the clip (and its mask and sound) twice faster + new_clip = clip.time_transform(lambda t: 2*t, apply_to=['mask', 'audio']) + + # plays the clip starting at t=3, and backwards: + new_clip = clip.time_transform(lambda t: 3-t) """ if apply_to is None: @@ -507,17 +509,20 @@ def iter_frames(self, fps=None, with_times=False, logger=None, dtype=None): dtype : type, optional Type to cast Numpy array frames. Use ``dtype="uint8"`` when using the - pictures to write video, images... + pictures to write video, images.. Examples -------- - >>> # prints the maximum of red that is contained - >>> # on the first line of each frame of the clip. - >>> from moviepy import VideoFileClip - >>> myclip = VideoFileClip('myvideo.mp4') - >>> print ( [frame[0,:,0].max() - for frame in myclip.iter_frames()]) + + .. code:: python + + # prints the maximum of red that is contained + # on the first line of each frame of the clip. + from moviepy import VideoFileClip + myclip = VideoFileClip('myvideo.mp4') + print([frame[0,:,0].max() + for frame in myclip.iter_frames()]) """ logger = proglog.default_bar_logger(logger) for frame_index in logger.iter_bar( diff --git a/moviepy/audio/AudioClip.py b/moviepy/audio/AudioClip.py index 6c8ef7b94..edcaf4888 100644 --- a/moviepy/audio/AudioClip.py +++ b/moviepy/audio/AudioClip.py @@ -47,19 +47,21 @@ class AudioClip(Clip): Examples -------- - >>> # Plays the note A in mono (a sine wave of frequency 440 Hz) - >>> import numpy as np - >>> frame_function = lambda t: np.sin(440 * 2 * np.pi * t) - >>> clip = AudioClip(frame_function, duration=5, fps=44100) - >>> clip.preview() - - >>> # Plays the note A in stereo (two sine waves of frequencies 440 and 880 Hz) - >>> frame_function = lambda t: np.array([ - ... np.sin(440 * 2 * np.pi * t), - ... np.sin(880 * 2 * np.pi * t) - ... ]).T.copy(order="C") - >>> clip = AudioClip(frame_function, duration=3, fps=44100) - >>> clip.preview() + .. code:: python + + # Plays the note A in mono (a sine wave of frequency 440 Hz) + import numpy as np + frame_function = lambda t: np.sin(440 * 2 * np.pi * t) + clip = AudioClip(frame_function, duration=5, fps=44100) + clip.preview() + + # Plays the note A in stereo (two sine waves of frequencies 440 and 880 Hz) + frame_function = lambda t: np.array([ + np.sin(440 * 2 * np.pi * t), + np.sin(880 * 2 * np.pi * t) + ]).T.copy(order="C") + clip = AudioClip(frame_function, duration=3, fps=44100) + clip.preview() """ diff --git a/moviepy/audio/fx/AudioDelay.py b/moviepy/audio/fx/AudioDelay.py index 2deca2268..d1611d07b 100644 --- a/moviepy/audio/fx/AudioDelay.py +++ b/moviepy/audio/fx/AudioDelay.py @@ -34,17 +34,19 @@ class AudioDelay(Effect): Examples -------- - >>> from moviepy import * - >>> videoclip = AudioFileClip('myaudio.wav').with_effects([ - ... afx.AudioDelay(offset=.2, n_repeats=10, decayment=.2) - ... ]) + .. code:: python - >>> # stereo A note - >>> frame_function = lambda t: np.array( - ... [np.sin(440 * 2 * np.pi * t), np.sin(880 * 2 * np.pi * t)] - ... ).T - ... clip = AudioClip(frame_function=frame_function, duration=0.1, fps=44100) - ... clip = clip.with_effects([afx.AudioDelay(offset=.2, n_repeats=11, decay=0)]) + from moviepy import * + videoclip = AudioFileClip('myaudio.wav').with_effects([ + afx.AudioDelay(offset=.2, n_repeats=10, decayment=.2) + ]) + + # stereo A note + frame_function = lambda t: np.array( + [np.sin(440 * 2 * np.pi * t), np.sin(880 * 2 * np.pi * t)] + ).T + clip = AudioClip(frame_function=frame_function, duration=0.1, fps=44100) + clip = clip.with_effects([afx.AudioDelay(offset=.2, n_repeats=11, decay=0)]) """ offset: float = 0.2 diff --git a/moviepy/audio/fx/AudioFadeIn.py b/moviepy/audio/fx/AudioFadeIn.py index efae20900..bcba677e6 100644 --- a/moviepy/audio/fx/AudioFadeIn.py +++ b/moviepy/audio/fx/AudioFadeIn.py @@ -22,8 +22,10 @@ class AudioFadeIn(Effect): Examples -------- - >>> clip = VideoFileClip("media/chaplin.mp4") - >>> clip.with_effects([vfx.AudioFadeIn("00:00:06")]) + .. code:: python + + clip = VideoFileClip("media/chaplin.mp4") + clip.with_effects([afx.AudioFadeIn("00:00:06")]) """ duration: float diff --git a/moviepy/audio/fx/AudioFadeOut.py b/moviepy/audio/fx/AudioFadeOut.py index b2d9e1560..555d9ccc6 100644 --- a/moviepy/audio/fx/AudioFadeOut.py +++ b/moviepy/audio/fx/AudioFadeOut.py @@ -23,8 +23,10 @@ class AudioFadeOut(Effect): Examples -------- - >>> clip = VideoFileClip("media/chaplin.mp4") - >>> clip.with_effects([afx.AudioFadeOut("00:00:06")]) + .. code:: python + + clip = VideoFileClip("media/chaplin.mp4") + clip.with_effects([afx.AudioFadeOut("00:00:06")]) """ duration: float diff --git a/moviepy/audio/fx/AudioLoop.py b/moviepy/audio/fx/AudioLoop.py index ceea293c6..156db132c 100644 --- a/moviepy/audio/fx/AudioLoop.py +++ b/moviepy/audio/fx/AudioLoop.py @@ -16,11 +16,13 @@ class AudioLoop(Effect): Examples -------- - >>> from moviepy import * - >>> videoclip = VideoFileClip('myvideo.mp4') - >>> music = AudioFileClip('music.ogg') - >>> audio = music.with_effects([afx.AudioLoop(duration=videoclip.duration)]) - >>> videoclip.with_audio(audio) + .. code:: python + + from moviepy import * + videoclip = VideoFileClip('myvideo.mp4') + music = AudioFileClip('music.ogg') + audio = music.with_effects([afx.AudioLoop(duration=videoclip.duration)]) + videoclip.with_audio(audio) """ diff --git a/moviepy/audio/fx/MultiplyStereoVolume.py b/moviepy/audio/fx/MultiplyStereoVolume.py index 2bc4d9649..b800a360d 100644 --- a/moviepy/audio/fx/MultiplyStereoVolume.py +++ b/moviepy/audio/fx/MultiplyStereoVolume.py @@ -15,12 +15,14 @@ class MultiplyStereoVolume(Effect): Examples -------- - >>> from moviepy import AudioFileClip - >>> music = AudioFileClip('music.ogg') - >>> # mutes left channel - >>> audio_r = music.with_effects([afx.MultiplyStereoVolume(left=0, right=1)]) - >>> # halves audio volume - >>> audio_h = music.with_effects([afx.MultiplyStereoVolume(left=0.5, right=0.5)]) + .. code:: python + + from moviepy import AudioFileClip + music = AudioFileClip('music.ogg') + # mutes left channel + audio_r = music.with_effects([afx.MultiplyStereoVolume(left=0, right=1)]) + # halves audio volume + audio_h = music.with_effects([afx.MultiplyStereoVolume(left=0.5, right=0.5)]) """ left: float = 1 diff --git a/moviepy/audio/fx/MultiplyVolume.py b/moviepy/audio/fx/MultiplyVolume.py index 3f7c20f9a..9030ebbea 100644 --- a/moviepy/audio/fx/MultiplyVolume.py +++ b/moviepy/audio/fx/MultiplyVolume.py @@ -30,16 +30,18 @@ class MultiplyVolume(Effect): Examples -------- - >>> from moviepy import AudioFileClip - >>> - >>> music = AudioFileClip("music.ogg") - >>> # doubles audio volume - >>> doubled_audio_clip = music.with_effects([afx.MultiplyVolume(2)]) - >>> # halves audio volume - >>> half_audio_clip = music.with_effects([afx.MultiplyVolume(0.5)]) - >>> # silences clip during one second at third - >>> effect = afx.MultiplyVolume(0, start_time=2, end_time=3) - >>> silenced_clip = clip.with_effects([effect]) + .. code:: python + + from moviepy import AudioFileClip + + music = AudioFileClip("music.ogg") + # doubles audio volume + doubled_audio_clip = music.with_effects([afx.MultiplyVolume(2)]) + # halves audio volume + half_audio_clip = music.with_effects([afx.MultiplyVolume(0.5)]) + # silences clip during one second at third + effect = afx.MultiplyVolume(0, start_time=2, end_time=3) + silenced_clip = clip.with_effects([effect]) """ factor: float diff --git a/moviepy/audio/io/AudioFileClip.py b/moviepy/audio/io/AudioFileClip.py index f23bd5706..67c30d069 100644 --- a/moviepy/audio/io/AudioFileClip.py +++ b/moviepy/audio/io/AudioFileClip.py @@ -49,8 +49,10 @@ class AudioFileClip(AudioClip): Examples -------- - >>> snd = AudioFileClip("song.wav") - >>> snd.close() + .. code:: python + + snd = AudioFileClip("song.wav") + snd.close() """ @convert_path_to_string("filename") diff --git a/moviepy/tools.py b/moviepy/tools.py index c2bc34724..52917b40f 100644 --- a/moviepy/tools.py +++ b/moviepy/tools.py @@ -58,20 +58,22 @@ def convert_to_seconds(time): Here are the accepted formats: - >>> convert_to_seconds(15.4) # seconds - 15.4 - >>> convert_to_seconds((1, 21.5)) # (min,sec) - 81.5 - >>> convert_to_seconds((1, 1, 2)) # (hr, min, sec) - 3662 - >>> convert_to_seconds('01:01:33.045') - 3693.045 - >>> convert_to_seconds('01:01:33,5') # coma works too - 3693.5 - >>> convert_to_seconds('1:33,5') # only minutes and secs - 99.5 - >>> convert_to_seconds('33.5') # only secs - 33.5 + .. code:: python + + convert_to_seconds(15.4) # seconds + 15.4 + convert_to_seconds((1, 21.5)) # (min,sec) + 81.5 + convert_to_seconds((1, 1, 2)) # (hr, min, sec) + 3662 + convert_to_seconds('01:01:33.045') + 3693.045 + convert_to_seconds('01:01:33,5') # coma works too + 3693.5 + convert_to_seconds('1:33,5') # only minutes and secs + 99.5 + convert_to_seconds('33.5') # only secs + 33.5 """ factors = (1, 60, 3600) @@ -101,12 +103,13 @@ def deprecated_version_of(func, old_name): Examples -------- - >>> # The badly named method 'to_file' is replaced by 'write_file' - >>> class Clip: - >>> def write_file(self, some args): - >>> # blablabla - >>> - >>> Clip.to_file = deprecated_version_of(Clip.write_file, 'to_file') + .. code:: python + + # The badly named method 'to_file' is replaced by 'write_file' + class Clip: + def write_file(self, some args): + # blablabla + Clip.to_file = deprecated_version_of(Clip.write_file, 'to_file') """ # Detect new name of func new_name = func.__name__ diff --git a/moviepy/video/VideoClip.py b/moviepy/video/VideoClip.py index 4bc3ed476..3e04463e5 100644 --- a/moviepy/video/VideoClip.py +++ b/moviepy/video/VideoClip.py @@ -326,10 +326,12 @@ def write_videofile( Examples -------- - >>> from moviepy import VideoFileClip - >>> clip = VideoFileClip("myvideo.mp4").subclipped(100,120) - >>> clip.write_videofile("my_new_video.mp4") - >>> clip.close() + .. code:: python + + from moviepy import VideoFileClip + clip = VideoFileClip("myvideo.mp4").subclipped(100,120) + clip.write_videofile("my_new_video.mp4") + clip.close() """ name, ext = os.path.splitext(os.path.basename(filename)) @@ -500,10 +502,12 @@ def write_gif( The gif will be playing the clip in real time (you can only change the frame rate). If you want the gif to be played - slower than the clip you will use :: + slower than the clip you will use + + .. code:: python - >>> # slow down clip 50% and make it a gif - >>> myClip.multiply_speed(0.5).to_gif('myClip.gif') + # slow down clip 50% and make it a gif + myClip.multiply_speed(0.5).to_gif('myClip.gif') """ # A little sketchy at the moment, maybe move all that in write_gif, @@ -538,10 +542,12 @@ def show(self, t=0, with_mask=True): Examples -------- - >>> from moviepy import * - >>> - >>> clip = VideoFileClip("media/chaplin.mp4") - >>> clip.show(t=4) + .. code:: python + + from moviepy import * + + clip = VideoFileClip("media/chaplin.mp4") + clip.show(t=4) """ clip = self.copy() @@ -592,9 +598,11 @@ def preview( Examples -------- - >>> from moviepy import * - >>> clip = VideoFileClip("media/chaplin.mp4") - >>> clip.preview(fps=10, audio=False) + .. code:: python + + from moviepy import * + clip = VideoFileClip("media/chaplin.mp4") + clip.preview(fps=10, audio=False) """ audio = audio and (self.audio is not None) audio_flag = None @@ -641,9 +649,11 @@ def with_effects_on_subclip( Examples -------- - >>> # The scene between times t=3s and t=6s in ``clip`` will be - >>> # be played twice slower in ``new_clip`` - >>> new_clip = clip.with_sub_effect(MultiplySpeed(0.5), 3, 6) + .. code:: python + + # The scene between times t=3s and t=6s in ``clip`` will be + # be played twice slower in ``new_clip`` + new_clip = clip.with_sub_effect(MultiplySpeed(0.5), 3, 6) """ left = None if (start_time == 0) else self.subclipped(0, start_time) @@ -892,16 +902,18 @@ def with_position(self, pos, relative=False): Examples -------- - >>> clip.with_position((45,150)) # x=45, y=150 - >>> - >>> # clip horizontally centered, at the top of the picture - >>> clip.with_position(("center","top")) - >>> - >>> # clip is at 40% of the width, 70% of the height: - >>> clip.with_position((0.4,0.7), relative=True) - >>> - >>> # clip's position is horizontally centered, and moving up ! - >>> clip.with_position(lambda t: ('center', 50+t) ) + .. code:: python + + clip.with_position((45,150)) # x=45, y=150 + + # clip horizontally centered, at the top of the picture + clip.with_position(("center","top")) + + # clip is at 40% of the width, 70% of the height: + clip.with_position((0.4,0.7), relative=True) + + # clip's position is horizontally centered, and moving up ! + clip.with_position(lambda t: ('center', 50+t)) """ self.relative_pos = relative @@ -1137,10 +1149,12 @@ class UpdatedVideoClip(VideoClip): UpdatedVideoClips have the following frame_function: - >>> def frame_function(t): - >>> while self.world.clip_t < t: - >>> world.update() # updates, and increases world.clip_t - >>> return world.to_frame() + .. code:: python + + def frame_function(t): + while self.world.clip_t < t: + world.update() # updates, and increases world.clip_t + return world.to_frame() Parameters ---------- diff --git a/moviepy/video/fx/MasksAnd.py b/moviepy/video/fx/MasksAnd.py index a67d8d271..c81a33d62 100644 --- a/moviepy/video/fx/MasksAnd.py +++ b/moviepy/video/fx/MasksAnd.py @@ -23,11 +23,13 @@ class MasksAnd(Effect): Examples -------- - >>> clip = ColorClip(color=(255, 0, 0), size=(1, 1)) # red - >>> mask = ColorClip(color=(0, 255, 0), size=(1, 1)) # green - >>> masked_clip = clip.with_effects([vfx.MasksAnd(mask)]) # black - >>> masked_clip.get_frame(0) - [[[0 0 0]]] + .. code:: python + + clip = ColorClip(color=(255, 0, 0), size=(1, 1)) # red + mask = ColorClip(color=(0, 255, 0), size=(1, 1)) # green + masked_clip = clip.with_effects([vfx.MasksAnd(mask)]) # black + masked_clip.get_frame(0) + [[[0 0 0]]] """ other_clip: Union[Clip, np.ndarray] diff --git a/moviepy/video/fx/MasksOr.py b/moviepy/video/fx/MasksOr.py index 7d215c4e1..7ec6d2eec 100644 --- a/moviepy/video/fx/MasksOr.py +++ b/moviepy/video/fx/MasksOr.py @@ -23,11 +23,13 @@ class MasksOr(Effect): Examples -------- - >>> clip = ColorClip(color=(255, 0, 0), size=(1, 1)) # red - >>> mask = ColorClip(color=(0, 255, 0), size=(1, 1)) # green - >>> masked_clip = clip.with_effects([vfx.MasksOr(mask)]) # yellow - >>> masked_clip.get_frame(0) - [[[255 255 0]]] + .. code:: python + + clip = ColorClip(color=(255, 0, 0), size=(1, 1)) # red + mask = ColorClip(color=(0, 255, 0), size=(1, 1)) # green + masked_clip = clip.with_effects([vfx.MasksOr(mask)]) # yellow + masked_clip.get_frame(0) + [[[255 255 0]]] """ other_clip: Union[Clip, np.ndarray] diff --git a/moviepy/video/fx/Resize.py b/moviepy/video/fx/Resize.py index 3f1ddfa76..44b857d39 100644 --- a/moviepy/video/fx/Resize.py +++ b/moviepy/video/fx/Resize.py @@ -32,10 +32,12 @@ class Resize(Effect): Examples -------- - >>> myClip.with_effects([vfx.Resize((460,720))]) # New resolution: (460,720) - >>> myClip.with_effects([vfx.Resize(0.6)]) # width and height multiplied by 0.6 - >>> myClip.with_effects([vfx.Resize(width=800)]) # height computed automatically. - >>> myClip.with_effects([vfx.Resize(lambda t : 1+0.02*t)]) # slow clip swelling + .. code:: python + + clip.with_effects([vfx.Resize((460,720))]) # New resolution: (460,720) + clip.with_effects([vfx.Resize(0.6)]) # width and height multiplied by 0.6 + clip.with_effects([vfx.Resize(width=800)]) # height computed automatically. + clip.with_effects([vfx.Resize(lambda t : 1+0.02*t)]) # slow clip swelling """ new_size: Union[tuple, float, callable] = None diff --git a/moviepy/video/fx/SlideIn.py b/moviepy/video/fx/SlideIn.py index 56277e097..7821d97b9 100644 --- a/moviepy/video/fx/SlideIn.py +++ b/moviepy/video/fx/SlideIn.py @@ -27,19 +27,21 @@ class SlideIn(Effect): Examples -------- - >>> from moviepy import * - >>> - >>> clips = [... make a list of clips] - >>> slided_clips = [ - ... CompositeVideoClip([clip.with_effects([vfx.SlideIn(1, "left")])]) - ... for clip in clips - ... ] - >>> final_clip = concatenate_videoclips(slided_clips, padding=-1) - >>> - >>> clip = ColorClip( - ... color=(255, 0, 0), duration=1, size=(300, 300) - ... ).with_fps(60) - >>> final_clip = CompositeVideoClip([clip.with_effects([vfx.SlideIn(1, "right")])]) + .. code:: python + + from moviepy import * + + clips = [... make a list of clips] + slided_clips = [ + CompositeVideoClip([clip.with_effects([vfx.SlideIn(1, "left")])]) + for clip in clips + ] + final_clip = concatenate_videoclips(slided_clips, padding=-1) + + clip = ColorClip( + color=(255, 0, 0), duration=1, size=(300, 300) + ).with_fps(60) + final_clip = CompositeVideoClip([clip.with_effects([vfx.SlideIn(1, "right")])]) """ duration: float diff --git a/moviepy/video/fx/SlideOut.py b/moviepy/video/fx/SlideOut.py index 49e4c2a44..a52ec6d49 100644 --- a/moviepy/video/fx/SlideOut.py +++ b/moviepy/video/fx/SlideOut.py @@ -27,19 +27,21 @@ class SlideOut(Effect): Examples -------- - >>> from moviepy import * - >>> - >>> clips = [... make a list of clips] - >>> slided_clips = [ - ... CompositeVideoClip([clip.with_effects([vfx.SlideOut(1, "left")])]) - ... for clip in clips - ... ] - >>> final_clip = concatenate_videoclips(slided_clips, padding=-1) - >>> - >>> clip = ColorClip( - ... color=(255, 0, 0), duration=1, size=(300, 300) - ... ).with_fps(60) - >>> final_clip = CompositeVideoClip([clip.with_effects([vfx.SlideOut(1, "right")])]) + .. code:: python + + from moviepy import * + + clips = [... make a list of clips] + slided_clips = [ + CompositeVideoClip([clip.with_effects([vfx.SlideOut(1, "left")])]) + for clip in clips + ] + final_clip = concatenate_videoclips(slided_clips, padding=-1) + + clip = ColorClip( + color=(255, 0, 0), duration=1, size=(300, 300) + ).with_fps(60) + final_clip = CompositeVideoClip([clip.with_effects([vfx.SlideOut(1, "right")])]) """ duration: float diff --git a/moviepy/video/io/VideoFileClip.py b/moviepy/video/io/VideoFileClip.py index e8486b38b..67ec8cf5a 100644 --- a/moviepy/video/io/VideoFileClip.py +++ b/moviepy/video/io/VideoFileClip.py @@ -7,13 +7,14 @@ class VideoFileClip(VideoClip): - """ - A video clip originating from a movie file. For instance: :: + """A video clip originating from a movie file. For instance: + + .. code:: python - >>> clip = VideoFileClip("myHolidays.mp4") - >>> clip.close() - >>> with VideoFileClip("myMaskVideo.avi") as clip2: - >>> pass # Implicit close called by context manager. + clip = VideoFileClip("myHolidays.mp4") + clip.close() + with VideoFileClip("myMaskVideo.avi") as clip2: + pass # Implicit close called by context manager. Parameters @@ -81,7 +82,6 @@ class VideoFileClip(VideoClip): If copies are made, and close() is called on one, it may cause methods on the other copies to fail. - """ @convert_path_to_string("filename") diff --git a/moviepy/video/io/display_in_notebook.py b/moviepy/video/io/display_in_notebook.py index 156719fcd..fe0776a76 100644 --- a/moviepy/video/io/display_in_notebook.py +++ b/moviepy/video/io/display_in_notebook.py @@ -88,17 +88,18 @@ def html_embed( Examples -------- + .. code:: python - >>> from moviepy import * - >>> # later ... - >>> html_embed(clip, width=360) - >>> html_embed(clip.audio) + from moviepy import * + # later ... + html_embed(clip, width=360) + html_embed(clip.audio) - >>> clip.write_gif("test.gif") - >>> html_embed('test.gif') + clip.write_gif("test.gif") + html_embed('test.gif') - >>> clip.save_frame("first_frame.jpeg") - >>> html_embed("first_frame.jpeg") + clip.save_frame("first_frame.jpeg") + html_embed("first_frame.jpeg") """ if rd_kwargs is None: # pragma: no cover rd_kwargs = {} @@ -246,16 +247,18 @@ def display_in_notebook( Examples -------- - >>> from moviepy import * - >>> # later ... - >>> clip.display_in_notebook(width=360) - >>> clip.audio.display_in_notebook() + .. code:: python - >>> clip.write_gif("test.gif") - >>> display_in_notebook('test.gif') + from moviepy import * + # later ... + clip.display_in_notebook(width=360) + clip.audio.display_in_notebook() - >>> clip.save_frame("first_frame.jpeg") - >>> display_in_notebook("first_frame.jpeg") + clip.write_gif("test.gif") + display_in_notebook('test.gif') + + clip.save_frame("first_frame.jpeg") + display_in_notebook("first_frame.jpeg") """ if not ipython_available: raise ImportError("Only works inside an Jupyter Notebook") diff --git a/moviepy/video/tools/credits.py b/moviepy/video/tools/credits.py index 5248bfc9f..8caec365d 100644 --- a/moviepy/video/tools/credits.py +++ b/moviepy/video/tools/credits.py @@ -16,22 +16,23 @@ class CreditsClip(TextClip): creditfile A string or path like object pointing to a text file - whose content must be as follows: :: + whose content must be as follows: - # This is a comment - # The next line says : leave 4 blank lines - .blank 4 + ..code:: python - ..Executive Story Editor - MARCEL DURAND + # This is a comment + # The next line says : leave 4 blank lines + .blank 4 - ..Associate Producers - MARTIN MARCEL - DIDIER MARTIN + ..Executive Story Editor + MARCEL DURAND - ..Music Supervisor - JEAN DIDIER + ..Associate Producers + MARTIN MARCEL + DIDIER MARTIN + ..Music Supervisor + JEAN DIDIER width Total width of the credits text in pixels diff --git a/moviepy/video/tools/cuts.py b/moviepy/video/tools/cuts.py index 6c7f30bb0..ba38a5f40 100644 --- a/moviepy/video/tools/cuts.py +++ b/moviepy/video/tools/cuts.py @@ -28,12 +28,14 @@ def find_video_period(clip, fps=None, start_time=0.3): Examples -------- - >>> from moviepy import * - >>> from moviepy.video.tools.cuts import find_video_period - >>> - >>> clip = VideoFileClip("media/chaplin.mp4").subclipped(0, 1).loop(2) - >>> round(videotools.find_video_period(clip, fps=80), 6) - 1 + .. code:: python + + from moviepy import * + from moviepy.video.tools.cuts import find_video_period + + clip = VideoFileClip("media/chaplin.mp4").subclipped(0, 1).loop(2) + round(videotools.find_video_period(clip, fps=80), 6) + 1 """ def frame(t): @@ -154,8 +156,10 @@ def filter(self, condition): Examples -------- - >>> # Only keep the matches corresponding to (> 1 second) sequences. - >>> new_matches = matches.filter( lambda match: match.time_span > 1) + .. code:: python + + # Only keep the matches corresponding to (> 1 second) sequences. + new_matches = matches.filter(lambda match: match.time_span > 1) """ return FramesMatches(filter(condition, self)) @@ -229,15 +233,17 @@ def from_clip(clip, distance_threshold, max_duration, fps=None, logger="bar"): We find all matching frames in a given video and turn the best match with a duration of 1.5 seconds or more into a GIF: - >>> from moviepy import VideoFileClip - >>> from moviepy.video.tools.cuts import FramesMatches - >>> - >>> clip = VideoFileClip("foo.mp4").resize(width=200) - >>> matches = FramesMatches.from_clip( - ... clip, distance_threshold=10, max_duration=3, # will take time - ... ) - >>> best = matches.filter(lambda m: m.time_span > 1.5).best() - >>> clip.subclipped(best.start_time, best.end_time).write_gif("foo.gif") + .. code:: python + + from moviepy import VideoFileClip + from moviepy.video.tools.cuts import FramesMatches + + clip = VideoFileClip("foo.mp4").resize(width=200) + matches = FramesMatches.from_clip( + clip, distance_threshold=10, max_duration=3, # will take time + ) + best = matches.filter(lambda m: m.time_span > 1.5).best() + clip.subclipped(best.start_time, best.end_time).write_gif("foo.gif") """ N_pixels = clip.w * clip.h * 3 @@ -338,22 +344,24 @@ def select_scenes( Examples -------- - >>> from pprint import pprint - >>> from moviepy import * - >>> from moviepy.video.tools.cuts import FramesMatches - >>> - >>> ch_clip = VideoFileClip("media/chaplin.mp4").subclipped(1, 4) - >>> mirror_and_clip = [ch_clip.with_effects([vfx.TimeMirror()]), ch_clip] - >>> clip = concatenate_videoclips(mirror_and_clip) - >>> - >>> result = FramesMatches.from_clip(clip, 10, 3).select_scenes( - ... 1, 2, nomatch_threshold=0, - ... ) - >>> print(result) - [(1.0000, 4.0000, 0.0000, 0.0000), - (1.1600, 3.8400, 0.0000, 0.0000), - (1.2800, 3.7200, 0.0000, 0.0000), - (1.4000, 3.6000, 0.0000, 0.0000)] + .. code:: python + + from pprint import pprint + from moviepy import * + from moviepy.video.tools.cuts import FramesMatches + + ch_clip = VideoFileClip("media/chaplin.mp4").subclipped(1, 4) + mirror_and_clip = [ch_clip.with_effects([vfx.TimeMirror()]), ch_clip] + clip = concatenate_videoclips(mirror_and_clip) + + result = FramesMatches.from_clip(clip, 10, 3).select_scenes( + 1, 2, nomatch_threshold=0, + ) + print(result) + # [(1.0000, 4.0000, 0.0000, 0.0000), + # (1.1600, 3.8400, 0.0000, 0.0000), + # (1.2800, 3.7200, 0.0000, 0.0000), + # (1.4000, 3.6000, 0.0000, 0.0000)] """ if nomatch_threshold is None: nomatch_threshold = match_threshold @@ -425,24 +433,26 @@ def write_gifs(self, clip, gifs_dir, **kwargs): Examples -------- - >>> import os - >>> from pprint import pprint - >>> from moviepy import * - >>> from moviepy.video.tools.cuts import FramesMatches - >>> - >>> ch_clip = VideoFileClip("media/chaplin.mp4").subclipped(1, 4) - >>> clip = concatenate_videoclips([ch_clip.time_mirror(), ch_clip]) - >>> - >>> result = FramesMatches.from_clip(clip, 10, 3).select_scenes( - ... 1, 2, nomatch_threshold=0, - ... ) - >>> - >>> os.mkdir("foo") - >>> result.write_gifs(clip, "foo") - MoviePy - Building file foo/00000100_00000400.gif with imageio. - MoviePy - Building file foo/00000115_00000384.gif with imageio. - MoviePy - Building file foo/00000128_00000372.gif with imageio. - MoviePy - Building file foo/00000140_00000360.gif with imageio. + .. code:: python + + import os + from pprint import pprint + from moviepy import * + from moviepy.video.tools.cuts import FramesMatches + + ch_clip = VideoFileClip("media/chaplin.mp4").subclipped(1, 4) + clip = concatenate_videoclips([ch_clip.time_mirror(), ch_clip]) + + result = FramesMatches.from_clip(clip, 10, 3).select_scenes( + 1, 2, nomatch_threshold=0, + ) + + os.mkdir("foo") + result.write_gifs(clip, "foo") + # MoviePy - Building file foo/00000100_00000400.gif with imageio. + # MoviePy - Building file foo/00000115_00000384.gif with imageio. + # MoviePy - Building file foo/00000128_00000372.gif with imageio. + # MoviePy - Building file foo/00000140_00000360.gif with imageio. """ for start, end, _, _ in self: name = "%s/%08d_%08d.gif" % (gifs_dir, 100 * start, 100 * end) diff --git a/moviepy/video/tools/drawing.py b/moviepy/video/tools/drawing.py index 070bf4855..77b68b9ba 100644 --- a/moviepy/video/tools/drawing.py +++ b/moviepy/video/tools/drawing.py @@ -100,26 +100,27 @@ def color_gradient( Examples -------- - >>> color_gradient((10, 1), (0, 0), p2=(10, 0)) # from white to black - [[1. 0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1]] - >>> - >>> color_gradient( # from red to green - ... (10, 1), # size - ... (0, 0), # p1 - ... p2=(10, 0), - ... color_1=(255, 0, 0), # red - ... color_2=(0, 255, 0), # green - ... ) - [[[ 0. 255. 0. ] - [ 25.5 229.5 0. ] - [ 51. 204. 0. ] - [ 76.5 178.5 0. ] - [102. 153. 0. ] - [127.5 127.5 0. ] - [153. 102. 0. ] - [178.5 76.5 0. ] - [204. 51. 0. ] - [229.5 25.5 0. ]]] + .. code:: python + + color_gradient((10, 1), (0, 0), p2=(10, 0)) # from white to black + #[[1. 0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1]] + # from red to green + color_gradient( + (10, 1), (0, 0), + p2=(10, 0), + color_1=(255, 0, 0), + color_2=(0, 255, 0) + ) + # [[[ 0. 255. 0. ] + # [ 25.5 229.5 0. ] + # [ 51. 204. 0. ] + # [ 76.5 178.5 0. ] + # [102. 153. 0. ] + # [127.5 127.5 0. ] + # [153. 102. 0. ] + # [178.5 76.5 0. ] + # [204. 51. 0. ] + # [229.5 25.5 0. ]]] """ # np-arrayize and change x,y coordinates to y,x w, h = size @@ -234,16 +235,18 @@ def color_split( Examples -------- - >>> size = [200, 200] - >>> - >>> # an image with all pixels with x<50 =0, the others =1 - >>> color_split(size, x=50, color_1=0, color_2=1) - >>> - >>> # an image with all pixels with y<50 red, the others green - >>> color_split(size, x=50, color_1=[255, 0, 0], color_2=[0, 255, 0]) - >>> - >>> # An image split along an arbitrary line (see below) - >>> color_split(size, p1=[20, 50], p2=[25, 70] color_1=0, color_2=1) + .. code:: python + + size = [200, 200] + + # an image with all pixels with x<50 =0, the others =1 + color_split(size, x=50, color_1=0, color_2=1) + + # an image with all pixels with y<50 red, the others green + color_split(size, x=50, color_1=[255, 0, 0], color_2=[0, 255, 0]) + + # An image split along an arbitrary line (see below) + color_split(size, p1=[20, 50], p2=[25, 70], color_1=0, color_2=1) """ if gradient_width or ((x is None) and (y is None)): if p2 is not None: @@ -304,18 +307,20 @@ def circle(screensize, center, radius, color=1.0, bg_color=0, blur=1): Examples -------- - >>> from moviepy.video.tools.drawing import circle - >>> - >>> circle( - ... (5, 5), # size - ... (2, 2), # center - ... 2, # radius - ... ) - array([[0. , 0. , 0. , 0. , 0. ], - [0. , 0.58578644, 1. , 0.58578644, 0. ], - [0. , 1. , 1. , 1. , 0. ], - [0. , 0.58578644, 1. , 0.58578644, 0. ], - [0. , 0. , 0. , 0. , 0. ]]) + .. code:: python + + from moviepy.video.tools.drawing import circle + + circle( + (5, 5), # size + (2, 2), # center + 2, # radius + ) + # array([[0. , 0. , 0. , 0. , 0. ], + # [0. , 0.58578644, 1. , 0.58578644, 0. ], + # [0. , 1. , 1. , 1. , 0. ], + # [0. , 0.58578644, 1. , 0.58578644, 0. ], + # [0. , 0. , 0. , 0. , 0. ]]) """ offset = 1.0 * (radius - blur) / radius if radius else 0 return color_gradient( diff --git a/moviepy/video/tools/interpolators.py b/moviepy/video/tools/interpolators.py index 268ad3a64..2e16144f0 100644 --- a/moviepy/video/tools/interpolators.py +++ b/moviepy/video/tools/interpolators.py @@ -30,11 +30,13 @@ class Interpolator: Examples -------- - >>> # instantiate using `tt` and `ss` - >>> interpolator = Interpolator(tt=[0, 1, 2], ss=[3, 4, 5]) - >>> - >>> # instantiate using `ttss` - >>> interpolator = Interpolator(ttss=[[0, 3], [1, 4], [2, 5]]) # [t, value] + .. code:: python + + # instantiate using `tt` and `ss` + interpolator = Interpolator(tt=[0, 1, 2], ss=[3, 4, 5]) + + # instantiate using `ttss` + interpolator = Interpolator(ttss=[[0, 3], [1, 4], [2, 5]]) # [t, value] """ def __init__(self, tt=None, ss=None, ttss=None, left=None, right=None): diff --git a/moviepy/video/tools/subtitles.py b/moviepy/video/tools/subtitles.py index 856ddb12a..b240f2de4 100644 --- a/moviepy/video/tools/subtitles.py +++ b/moviepy/video/tools/subtitles.py @@ -40,15 +40,17 @@ class SubtitlesClip(VideoClip): Examples -------- - >>> from moviepy.video.tools.subtitles import SubtitlesClip - >>> from moviepy.video.io.VideoFileClip import VideoFileClip - >>> generator = lambda text: TextClip(text, font='Georgia-Regular', - ... font_size=24, color='white') - >>> sub = SubtitlesClip("subtitles.srt", generator) - >>> sub = SubtitlesClip("subtitles.srt", generator, encoding='utf-8') - >>> myvideo = VideoFileClip("myvideo.avi") - >>> final = CompositeVideoClip([clip, subtitles]) - >>> final.write_videofile("final.mp4", fps=myvideo.fps) + .. code:: python + + from moviepy.video.tools.subtitles import SubtitlesClip + from moviepy.video.io.VideoFileClip import VideoFileClip + generator = lambda text: TextClip(text, font='Georgia-Regular', + font_size=24, color='white') + sub = SubtitlesClip("subtitles.srt", generator) + sub = SubtitlesClip("subtitles.srt", generator, encoding='utf-8') + myvideo = VideoFileClip("myvideo.avi") + final = CompositeVideoClip([clip, subtitles]) + final.write_videofile("final.mp4", fps=myvideo.fps) """ diff --git a/tests/test_tools.py b/tests/test_tools.py index 1a0c477bb..0e4e5807c 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -90,7 +90,7 @@ def to_file(*args, **kwargs): with pytest.warns(PendingDeprecationWarning) as record: func(1, b=2) - assert len(record) == 1 + assert len(record) > 0 assert record[0].message.args[0] == expected_warning_message