From 9fd857d2714f4e4cca61d33c955c4421f8e3f95e Mon Sep 17 00:00:00 2001 From: Zulko Date: Sat, 23 Nov 2024 13:29:50 -0500 Subject: [PATCH 01/12] more api changes --- README.md | 4 +- .../moviepy_10_minutes/trailer.py | 18 ++-- .../quick_presentation/basic_example.py | 4 +- .../compositing/CompositeAudioClip.py | 2 +- .../compositing/CompositeVideoClip.py | 2 +- .../user_guide/compositing/concatenate.py | 2 +- .../user_guide/compositing/crossfadein.py | 2 +- .../user_guide/compositing/with_position.py | 4 +- .../code/user_guide/compositing/with_start.py | 2 +- .../user_guide/effects/modify_copy_example.py | 6 +- .../code/user_guide/loading/AudioArrayClip.py | 4 +- .../code/user_guide/loading/AudioClip.py | 4 +- .../code/user_guide/loading/DataVideoClip.py | 4 +- .../code/user_guide/loading/VideoClip.py | 4 +- .../code/user_guide/loading/VideoFileClip.py | 2 +- .../code/user_guide/loading/loading.py | 12 +-- .../code/user_guide/rendering/preview.py | 2 +- .../code/user_guide/rendering/write_gif.py | 2 +- .../user_guide/rendering/write_videofile.py | 2 +- docs/getting_started/moviepy_10_minutes.rst | 6 +- docs/user_guide/compositing.rst | 2 +- docs/user_guide/loading.rst | 8 +- docs/user_guide/modifying.rst | 4 +- docs/user_guide/rendering.rst | 2 +- moviepy/Clip.py | 30 +++--- moviepy/audio/AudioClip.py | 24 ++--- moviepy/audio/fx/AudioDelay.py | 4 +- moviepy/audio/io/AudioFileClip.py | 2 +- moviepy/decorators.py | 2 +- moviepy/video/VideoClip.py | 94 +++++++++---------- .../video/compositing/CompositeVideoClip.py | 12 +-- moviepy/video/fx/Blink.py | 2 +- moviepy/video/fx/CrossFadeIn.py | 2 +- moviepy/video/fx/CrossFadeOut.py | 2 +- moviepy/video/fx/MakeLoopable.py | 2 +- moviepy/video/fx/Margin.py | 2 +- moviepy/video/fx/Rotate.py | 2 +- moviepy/video/io/ImageSequenceClip.py | 20 ++-- moviepy/video/io/VideoFileClip.py | 8 +- moviepy/video/tools/cuts.py | 10 +- moviepy/video/tools/subtitles.py | 4 +- tests/conftest.py | 2 +- tests/test_AudioClips.py | 6 +- tests/test_Clip.py | 6 +- tests/test_SubtitlesClip.py | 2 +- tests/test_VideoClip.py | 28 +++--- tests/test_VideoFileClip.py | 2 +- tests/test_compositing.py | 2 +- tests/test_ffmpeg_reader.py | 2 +- tests/test_fx.py | 34 +++---- tests/test_issues.py | 14 +-- tests/test_videotools.py | 12 +-- 52 files changed, 214 insertions(+), 222 deletions(-) diff --git a/README.md b/README.md index de2b2f75a..2db51c8ad 100644 --- a/README.md +++ b/README.md @@ -24,10 +24,10 @@ result to a new file: from moviepy import * # Load file example.mp4 and keep only the subclip from 00:00:10 to 00:00:20 -clip = VideoFileClip("long_examples/example2.mp4").with_subclip(10, 20) +clip = VideoFileClip("long_examples/example2.mp4").subclipped(10, 20) # Reduce the audio volume to 80% of its original volume -clip = clip.with_multiply_volume(0.8) +clip = clip.with_volume_scaled(0.8) # Generate a text clip. You can customize the font, color, etc. txt_clip = TextClip(font="example.ttf", text="Big Buck Bunny", font_size=70, color='white') diff --git a/docs/_static/code/getting_started/moviepy_10_minutes/trailer.py b/docs/_static/code/getting_started/moviepy_10_minutes/trailer.py index f530c383a..a914f8950 100644 --- a/docs/_static/code/getting_started/moviepy_10_minutes/trailer.py +++ b/docs/_static/code/getting_started/moviepy_10_minutes/trailer.py @@ -16,13 +16,13 @@ # We extract the scenes we want to use # First the characters -intro_clip = video.with_subclip(1, 11) -bird_clip = video.with_subclip(16, 20) -bunny_clip = video.with_subclip(37, 55) -rodents_clip = video.with_subclip( +intro_clip = video.subclipped(1, 11) +bird_clip = video.subclipped(16, 20) +bunny_clip = video.subclipped(37, 55) +rodents_clip = video.subclipped( "00:03:34.75", "00:03:56" ) # we can also use string notation with format HH:MM:SS.uS -rambo_clip = video.with_subclip("04:41.5", "04:44.70") +rambo_clip = video.subclipped("04:41.5", "04:44.70") ##################### @@ -43,8 +43,8 @@ ############################## # Well, looking at the rodent scene it is a bit long isn't? # Let's see how we modify the clip with one of the many clip manipulation method starting by with_* -# in that case by removing of the clip the part between 00:06:00 to 00:10:00 of the clip, using with_cutout -rodents_clip = rodents_clip.with_cutout(start_time=4, end_time=10) +# in that case by removing of the clip the part between 00:06:00 to 00:10:00 of the clip, using with_section_cut_out +rodents_clip = rodents_clip.with_section_cut_out(start_time=4, end_time=10) # Note: You may have noticed that we have reassign rodents_clip, this is because all with_* methods return a modified *copy* of the # original clip instead of modifying it directly. In MoviePy any function starting by with_* is out-place instead of in-place @@ -210,8 +210,8 @@ # Effects are not only for transition, they can also change a clip timing or apparence # To show that, lets also modify the Rambo-like part of our clip to be in slow motion -# PS : We do it for effect, but this is one of the few effects that have a direct shortcut, with_multiply_speed -# the others are with_multiply_volume, resized, croped and rotated +# PS : We do it for effect, but this is one of the few effects that have a direct shortcut, with_speed_scaled +# the others are with_volume_scaled, resized, croped and rotated rambo_clip = rambo_clip.with_effects([vfx.MultiplySpeed(0.5)]) # Because we modified timing of rambo_clip with our MultiplySpeed effect, we must re-assign the following clips timing diff --git a/docs/_static/code/getting_started/quick_presentation/basic_example.py b/docs/_static/code/getting_started/quick_presentation/basic_example.py index 2a32d0ec8..db2316245 100644 --- a/docs/_static/code/getting_started/quick_presentation/basic_example.py +++ b/docs/_static/code/getting_started/quick_presentation/basic_example.py @@ -2,10 +2,10 @@ from moviepy import * # Load file example.mp4 and extract only the subclip from 00:00:10 to 00:00:20 -clip = VideoFileClip("long_examples/example2.mp4").with_subclip(10, 20) +clip = VideoFileClip("long_examples/example2.mp4").subclipped(10, 20) # Reduce the audio volume to 80% of his original volume -clip = clip.with_multiply_volume(0.8) +clip = clip.with_volume_scaled(0.8) # Generate a text clip. You can customize the font, color, etc. txt_clip = TextClip( diff --git a/docs/_static/code/user_guide/compositing/CompositeAudioClip.py b/docs/_static/code/user_guide/compositing/CompositeAudioClip.py index a810bfcf1..121dc4b24 100644 --- a/docs/_static/code/user_guide/compositing/CompositeAudioClip.py +++ b/docs/_static/code/user_guide/compositing/CompositeAudioClip.py @@ -11,7 +11,7 @@ # We will play aclip1, then ontop of it aclip2 after 5s, and the aclip3 on top of both after 9s compo = CompositeAudioClip( [ - aclip1.with_multiply_volume(1.2), + aclip1.with_volume_scaled(1.2), aclip2.with_start(5), # start at t=5s aclip3.with_start(9), ] diff --git a/docs/_static/code/user_guide/compositing/CompositeVideoClip.py b/docs/_static/code/user_guide/compositing/CompositeVideoClip.py index 1c1785506..5da6b5181 100644 --- a/docs/_static/code/user_guide/compositing/CompositeVideoClip.py +++ b/docs/_static/code/user_guide/compositing/CompositeVideoClip.py @@ -2,7 +2,7 @@ # We load all the clips we want to compose clip1 = VideoFileClip("example.mp4") -clip2 = VideoFileClip("example2.mp4").with_subclip(0, 1) +clip2 = VideoFileClip("example2.mp4").subclipped(0, 1) clip3 = VideoFileClip("example3.mp4") # We concatenate them and write theme stacked on top of each other, with clip3 over clip2 over clip1 diff --git a/docs/_static/code/user_guide/compositing/concatenate.py b/docs/_static/code/user_guide/compositing/concatenate.py index edc99137c..5bdaf0252 100644 --- a/docs/_static/code/user_guide/compositing/concatenate.py +++ b/docs/_static/code/user_guide/compositing/concatenate.py @@ -2,7 +2,7 @@ # We load all the clips we want to concatenate clip1 = VideoFileClip("example.mp4") -clip2 = VideoFileClip("example2.mp4").with_subclip(0, 1) +clip2 = VideoFileClip("example2.mp4").subclipped(0, 1) clip3 = VideoFileClip("example3.mp4") # We concatenate them and write the result diff --git a/docs/_static/code/user_guide/compositing/crossfadein.py b/docs/_static/code/user_guide/compositing/crossfadein.py index ffdae5d2e..3127e1832 100644 --- a/docs/_static/code/user_guide/compositing/crossfadein.py +++ b/docs/_static/code/user_guide/compositing/crossfadein.py @@ -2,7 +2,7 @@ # We load all the clips we want to compose clip1 = VideoFileClip("example.mp4") -clip2 = VideoFileClip("example2.mp4").with_subclip(0, 1) +clip2 = VideoFileClip("example2.mp4").subclipped(0, 1) # Clip2 will be on top of clip1 for 1s clip1 = clip1.with_end(2) diff --git a/docs/_static/code/user_guide/compositing/with_position.py b/docs/_static/code/user_guide/compositing/with_position.py index bd801d3f5..b020db854 100644 --- a/docs/_static/code/user_guide/compositing/with_position.py +++ b/docs/_static/code/user_guide/compositing/with_position.py @@ -1,7 +1,7 @@ from moviepy import * # We load all the clips we want to compose -background = VideoFileClip("example2.mp4").with_subclip(0, 2) +background = VideoFileClip("example2.mp4").subclipped(0, 2) title = TextClip( "./example.ttf", text="Big Buck Bunny", @@ -42,7 +42,7 @@ copyright = copyright.with_position(("center", background.h - copyright.h - 30)) # Finally, we want the logo to be in the center, but to drop as time pass -# We can do so by setting position as a function that take time as argument, a lot like make_frame +# We can do so by setting position as a function that take time as argument, a lot like frame_function top = (background.h - logo.h) / 2 logo = logo.with_position(lambda t: ("center", top + t * 30)) diff --git a/docs/_static/code/user_guide/compositing/with_start.py b/docs/_static/code/user_guide/compositing/with_start.py index e59848da4..ae06bb59d 100644 --- a/docs/_static/code/user_guide/compositing/with_start.py +++ b/docs/_static/code/user_guide/compositing/with_start.py @@ -2,7 +2,7 @@ # We load all the clips we want to compose clip1 = VideoFileClip("example.mp4") -clip2 = VideoFileClip("example2.mp4").with_subclip(0, 1) +clip2 = VideoFileClip("example2.mp4").subclipped(0, 1) clip3 = VideoFileClip("example3.mp4") # We want to stop clip1 after 1s diff --git a/docs/_static/code/user_guide/effects/modify_copy_example.py b/docs/_static/code/user_guide/effects/modify_copy_example.py index 49595e949..d965b0a03 100644 --- a/docs/_static/code/user_guide/effects/modify_copy_example.py +++ b/docs/_static/code/user_guide/effects/modify_copy_example.py @@ -6,14 +6,14 @@ # This does nothing, as multiply_volume will return a copy of clip which you will loose immediatly as you dont store it # If you was to render clip now, the audio would still be at full volume -clip.with_multiply_volume(0.1) +clip.with_volume_scaled(0.1) # This create a copy of clip in clip_whisper with a volume of only 10% the original, but does not modify the original clip # If you was to render clip right now, the audio would still be at full volume # If you was to render clip_whisper, the audio would be a 10% of the original volume -clip_whisper = clip.with_multiply_volume(0.1) +clip_whisper = clip.with_volume_scaled(0.1) # This replace the original clip with a copy of it where volume is only 10% of the original # If you was to render clip now, the audio would be at 10% # The original clip is now lost -clip = clip.with_multiply_volume(0.1) +clip = clip.with_volume_scaled(0.1) diff --git a/docs/_static/code/user_guide/loading/AudioArrayClip.py b/docs/_static/code/user_guide/loading/AudioArrayClip.py index 1ed74ade4..6a02d41a0 100644 --- a/docs/_static/code/user_guide/loading/AudioArrayClip.py +++ b/docs/_static/code/user_guide/loading/AudioArrayClip.py @@ -12,7 +12,7 @@ total_size = note_size * len(notes) -def make_frame(t, note_frequency): +def frame_function(t, note_frequency): return np.sin(note_frequency * 2 * np.pi * t) @@ -24,7 +24,7 @@ def make_frame(t, note_frequency): i = 0 for note, frequency in notes.items(): for _ in range(note_size): - audio_array[i][0] = make_frame(times[i], frequency) + audio_array[i][0] = frame_function(times[i], frequency) i += 1 # Create an AudioArrayClip from the audio samples diff --git a/docs/_static/code/user_guide/loading/AudioClip.py b/docs/_static/code/user_guide/loading/AudioClip.py index 37f51b4c7..fe1785c8e 100644 --- a/docs/_static/code/user_guide/loading/AudioClip.py +++ b/docs/_static/code/user_guide/loading/AudioClip.py @@ -2,7 +2,7 @@ import numpy as np # Producing a sinewave of 440 Hz -> note A -make_frame_audio = lambda t: np.sin(440 * 2 * np.pi * t) +frame_function_audio = lambda t: np.sin(440 * 2 * np.pi * t) # AUDIO CLIPS -clip = AudioClip(make_frame_audio, duration=3) +clip = AudioClip(frame_function_audio, duration=3) diff --git a/docs/_static/code/user_guide/loading/DataVideoClip.py b/docs/_static/code/user_guide/loading/DataVideoClip.py index d44e04e49..3d59498f1 100644 --- a/docs/_static/code/user_guide/loading/DataVideoClip.py +++ b/docs/_static/code/user_guide/loading/DataVideoClip.py @@ -13,13 +13,13 @@ # The function make frame take data and create an image of 200x100 px fill with the color -def make_frame(data): +def frame_function(data): frame = np.full((100, 200, 3), data, dtype=np.uint8) return frame # We create the DataVideoClip, and we set FPS at 2, making a 3s clip (because len(dataset) = 6, so 6/2=3) -myclip = DataVideoClip(data=dataset, data_to_frame=make_frame, fps=2) +myclip = DataVideoClip(data=dataset, data_to_frame=frame_function, fps=2) # Modifying fps here will change video FPS, not clip FPS myclip.write_videofile("result.mp4", fps=30) diff --git a/docs/_static/code/user_guide/loading/VideoClip.py b/docs/_static/code/user_guide/loading/VideoClip.py index 6d5485306..2385eee3b 100644 --- a/docs/_static/code/user_guide/loading/VideoClip.py +++ b/docs/_static/code/user_guide/loading/VideoClip.py @@ -7,7 +7,7 @@ RED = (255, 0, 0) -def make_frame(t): +def frame_function(t): frequency = 1 # One pulse per second coef = 0.5 * (1 + math.sin(2 * math.pi * frequency * t)) # radius varies over time radius = WIDTH * coef @@ -25,7 +25,7 @@ def make_frame(t): clip = VideoClip( - make_frame, duration=2 + frame_function, duration=2 ) # we define a 2s duration for the clip to be able to render it later clip.write_gif( "circle.gif", fps=15 diff --git a/docs/_static/code/user_guide/loading/VideoFileClip.py b/docs/_static/code/user_guide/loading/VideoFileClip.py index f798b6872..2d24a8786 100644 --- a/docs/_static/code/user_guide/loading/VideoFileClip.py +++ b/docs/_static/code/user_guide/loading/VideoFileClip.py @@ -6,7 +6,7 @@ print("Clip duration: {}".format(myclip.duration)) print("Clip fps: {}".format(myclip.fps)) -myclip = myclip.with_subclip(0.5, 2) # Cutting the clip between 0.5 and 2 secs. +myclip = myclip.subclipped(0.5, 2) # Cutting the clip between 0.5 and 2 secs. print("Clip duration: {}".format(myclip.duration)) # Cuting will update duration print("Clip fps: {}".format(myclip.fps)) # and keep fps diff --git a/docs/_static/code/user_guide/loading/loading.py b/docs/_static/code/user_guide/loading/loading.py index 4eb9b7800..0166cb944 100644 --- a/docs/_static/code/user_guide/loading/loading.py +++ b/docs/_static/code/user_guide/loading/loading.py @@ -4,16 +4,16 @@ # Define some constants for later use black = (255, 255, 255) # RGB for black # Random noise image of 200x100 -make_frame = lambda t: np.random.randint(low=0, high=255, size=(100, 200, 3)) +frame_function = lambda t: np.random.randint(low=0, high=255, size=(100, 200, 3)) # A note by producing a sinewave of 440 Hz -make_frame_audio = lambda t: np.sin(440 * 2 * np.pi * t) +frame_function_audio = lambda t: np.sin(440 * 2 * np.pi * t) # Now lets see how to load different type of resources ! # VIDEO CLIPS` clip = VideoClip( - make_frame, duration=5 -) # for custom animations, where make_frame is a function returning an image as numpy array for a given time + frame_function, duration=5 +) # for custom animations, where frame_function is a function returning an image as numpy array for a given time clip = VideoFileClip("example.mp4") # for videos clip = ImageSequenceClip( "example_img_dir", fps=24 @@ -31,5 +31,5 @@ "example.wav" ) # for audio files, but also videos where you only want the keep the audio track clip = AudioClip( - make_frame_audio, duration=3 -) # for custom audio, where make_frame is a function returning a float (or tuple for stereo) for a given time + frame_function_audio, duration=3 +) # for custom audio, where frame_function is a function returning a float (or tuple for stereo) for a given time diff --git a/docs/_static/code/user_guide/rendering/preview.py b/docs/_static/code/user_guide/rendering/preview.py index be7ff92ff..a7de22308 100644 --- a/docs/_static/code/user_guide/rendering/preview.py +++ b/docs/_static/code/user_guide/rendering/preview.py @@ -1,6 +1,6 @@ from moviepy import * -myclip = VideoFileClip("./example.mp4").with_subclip(0, 1) # Keep only 0 to 1 sec +myclip = VideoFileClip("./example.mp4").subclipped(0, 1) # Keep only 0 to 1 sec # We preview our clip as a video, inheriting FPS and audio of the original clip myclip.preview() diff --git a/docs/_static/code/user_guide/rendering/write_gif.py b/docs/_static/code/user_guide/rendering/write_gif.py index 8f28aa4be..69580f7cd 100644 --- a/docs/_static/code/user_guide/rendering/write_gif.py +++ b/docs/_static/code/user_guide/rendering/write_gif.py @@ -1,6 +1,6 @@ from moviepy import * -myclip = VideoFileClip("example.mp4").with_subclip(0, 2) +myclip = VideoFileClip("example.mp4").subclipped(0, 2) # Here we just save as GIF myclip.write_gif("result.gif") diff --git a/docs/_static/code/user_guide/rendering/write_videofile.py b/docs/_static/code/user_guide/rendering/write_videofile.py index e42538a6b..9abc1ceea 100644 --- a/docs/_static/code/user_guide/rendering/write_videofile.py +++ b/docs/_static/code/user_guide/rendering/write_videofile.py @@ -1,7 +1,7 @@ from moviepy import * # We load all the clips we want to compose -background = VideoFileClip("long_examples/example2.mp4").with_subclip(0, 10) +background = VideoFileClip("long_examples/example2.mp4").subclipped(0, 10) title = TextClip( "./example.ttf", text="Big Buck Bunny", diff --git a/docs/getting_started/moviepy_10_minutes.rst b/docs/getting_started/moviepy_10_minutes.rst index 46298b784..033a37eb7 100644 --- a/docs/getting_started/moviepy_10_minutes.rst +++ b/docs/getting_started/moviepy_10_minutes.rst @@ -63,7 +63,7 @@ This is a very classic task, so let's turn our main clip into multiple subclips: :lines: 13-25 -Here, we use the ``with_subclip`` method to extract specific scenes from the main video. We provide the start and end times (in seconds or as text with the format ``HH:MM:SS.µS``) for each scene. +Here, we use the ``subclipped`` method to extract specific scenes from the main video. We provide the start and end times (in seconds or as text with the format ``HH:MM:SS.µS``) for each scene. The extracted clips are stored in their respective variables (``intro_clip``, ``bird_clip``, etc.). Step 3: Take a First Look with Preview @@ -93,13 +93,13 @@ Step 4: Modify a Clip by Cutting Out a Part of It -------------------------------------------------- After previewing the clips, we notice that the rodents' scene is a bit long. Let's modify the clip by removing a specific part. It would be nice to remove parts of the scene that we don't need. This is also quite a common task in video-editing. -To do so, we are going to use the ``with_cutout`` method to remove a portion of the clip between ``00:06:00`` to ``00:10:00``. +To do so, we are going to use the ``with_section_cut_out`` method to remove a portion of the clip between ``00:06:00`` to ``00:10:00``. .. literalinclude:: /_static/code/getting_started/moviepy_10_minutes/trailer.py :language: python :lines: 41-54 -In that particular case, we have used the ``with_cutout``, but this is only one of the many clip manipulation methods starting with ``with_*``. We will see a few others +In that particular case, we have used the ``with_section_cut_out``, but this is only one of the many clip manipulation methods starting with ``with_*``. We will see a few others in this tutorial, but we will miss a lot more. If you want an exhaustive list, go see :ref:`reference_manual`. .. note:: diff --git a/docs/user_guide/compositing.rst b/docs/user_guide/compositing.rst index 9f395800e..4078ed14e 100644 --- a/docs/user_guide/compositing.rst +++ b/docs/user_guide/compositing.rst @@ -89,7 +89,7 @@ So, considering that you would want to play ``clip1`` for the first 6 seconds, ` .. note:: When working with timing of your clip, you will frequently want to keep only parts of the original clip. - To do so, you should take a look at :py:meth:`~moviepy.Clip.Clip.with_subclip` and :py:meth:`~moviepy.Clip.Clip.with_cutout`. + To do so, you should take a look at :py:meth:`~moviepy.Clip.Clip.subclipped` and :py:meth:`~moviepy.Clip.Clip.with_section_cut_out`. Positioning clips diff --git a/docs/user_guide/loading.rst b/docs/user_guide/loading.rst index 2a77b6bb0..28bdb87d0 100644 --- a/docs/user_guide/loading.rst +++ b/docs/user_guide/loading.rst @@ -65,7 +65,7 @@ VideoClip """""""""" :py:class:`~moviepy.video.VideoClip.VideoClip` is the base class for all the other video clips in MoviePy. If all you want is to edit video files, you will never need it. This class is practical when you want to make animations from frames that are generated by another library. -All you need is to define a function ``make_frame(t)`` which returns a `HxWx3` numpy array (of 8-bits integers) representing the frame at time ``t``. +All you need is to define a function ``frame_function(t)`` which returns a `HxWx3` numpy array (of 8-bits integers) representing the frame at time ``t``. Here is an example where we will create a pulsating red circle with graphical library `pillow `_. @@ -81,7 +81,7 @@ Resulting in this. .. note:: - Clips that are made with a ``make_frame`` do not have an explicit frame rate nor duration by default, so you must provide duration at clip creation and a frame rate (``fps``, frames per second) for :py:meth:`~moviepy.video.VideoClip.VideoClip.write_gif` and :py:meth:`~moviepy.video.VideoClip.VideoClip.write_videofile`, and more generally for any methods that requires iterating through the frames. + Clips that are made with a ``frame_function`` do not have an explicit frame rate nor duration by default, so you must provide duration at clip creation and a frame rate (``fps``, frames per second) for :py:meth:`~moviepy.video.VideoClip.VideoClip.write_gif` and :py:meth:`~moviepy.video.VideoClip.VideoClip.write_videofile`, and more generally for any methods that requires iterating through the frames. For more, see :py:class:`~moviepy.video.VideoClip.VideoClip`. @@ -137,7 +137,7 @@ UpdatedVideoClip .. warning:: This is really advanced usage, you will probably never need it, if you do, please go read the code. -:py:class:`~moviepy.video.io.VideoClip.UpdatedVideoClip` is a video whose make_frame requires some objects to be updated before we can compute it. +:py:class:`~moviepy.video.io.VideoClip.UpdatedVideoClip` is a video whose frame_function requires some objects to be updated before we can compute it. This is particularly practical in science where some algorithm needs to make some steps before a new frame can be generated, or maybe when trying to make a video based on a live exterior context. @@ -244,7 +244,7 @@ AudioClip :py:class:`~moviepy.audio.AudioClip.AudioClip` is the base class for all audio clips. If all you want is to edit audio files, you will never need it. -All you need is to define a function ``make_frame(t)`` which returns a ``Nx1`` or ``Nx2`` numpy array representing the sound at time ``t``. +All you need is to define a function ``frame_function(t)`` which returns a ``Nx1`` or ``Nx2`` numpy array representing the sound at time ``t``. .. literalinclude:: /_static/code/user_guide/loading/AudioClip.py :language: python diff --git a/docs/user_guide/modifying.rst b/docs/user_guide/modifying.rst index be98470dc..23b857b1d 100644 --- a/docs/user_guide/modifying.rst +++ b/docs/user_guide/modifying.rst @@ -37,7 +37,7 @@ It means that creating a new clip is neither time nor memory hungry, all the com Time representations in MoviePy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Many methods that we will see accept duration or timepoint as arguments. For instance :py:meth:`clip.with_subclip(t_start, t_end) ` which cuts the clip between two timepoints. +Many methods that we will see accept duration or timepoint as arguments. For instance :py:meth:`clip.subclipped(t_start, t_end) ` which cuts the clip between two timepoints. MoviePy usually accept duration and timepoint as either : @@ -45,7 +45,7 @@ MoviePy usually accept duration and timepoint as either : * a ``tuple`` with ``(minutes, seconds)`` or ``(hours, minutes, seconds)``. * a ``string`` such as ``'00:03:50.54'``. -Also, you can usually provide negative times, indicating a time from the end of the clip. For example, ``clip.with_subclip(-20, -10)`` cuts the clip between 20s before the end and 10s before the end. +Also, you can usually provide negative times, indicating a time from the end of the clip. For example, ``clip.subclipped(-20, -10)`` cuts the clip between 20s before the end and 10s before the end. Modify a clip using the ``with_*`` methods diff --git a/docs/user_guide/rendering.rst b/docs/user_guide/rendering.rst index 535fd444b..52474c530 100644 --- a/docs/user_guide/rendering.rst +++ b/docs/user_guide/rendering.rst @@ -24,7 +24,7 @@ The first thing you can do is to preview your clip as a video, by calling method .. literalinclude:: /_static/code/user_guide/rendering/preview.py :language: python -You will probably frequently want to preview only a small portion of your clip, though ``preview`` do not offer such capabilities, you can easily emulate such behavior by using :py:meth:`~moviepy.Clip.Clip.with_subclip`. +You will probably frequently want to preview only a small portion of your clip, though ``preview`` do not offer such capabilities, you can easily emulate such behavior by using :py:meth:`~moviepy.Clip.Clip.subclipped`. .. note:: It is quite frequent for a clip preview to be out of sync, or to play slower than it should. It means that your computer is not powerful enough to render the clip in real time. diff --git a/moviepy/Clip.py b/moviepy/Clip.py index af0937b77..4bdd95839 100644 --- a/moviepy/Clip.py +++ b/moviepy/Clip.py @@ -79,12 +79,12 @@ def get_frame(self, t): if t == self.memoized_t: return self.memoized_frame else: - frame = self.make_frame(t) + frame = self.frame_function(t) self.memoized_t = t self.memoized_frame = frame return frame else: - return self.make_frame(t) + return self.frame_function(t) def transform(self, func, apply_to=None, keep_duration=True): """General processing of a clip. @@ -126,8 +126,8 @@ def transform(self, func, apply_to=None, keep_duration=True): if apply_to is None: apply_to = [] - # mf = copy(self.make_frame) - new_clip = self.with_make_frame(lambda t: func(self.get_frame, t)) + # mf = copy(self.frame_function) + new_clip = self.with_updated_frame_function(lambda t: func(self.get_frame, t)) if not keep_duration: new_clip.duration = None @@ -294,17 +294,17 @@ def with_duration(self, duration, change_end=True): self.start = self.end - duration @outplace - def with_make_frame(self, make_frame): - """Sets a ``make_frame`` attribute for the clip. Useful for setting + def with_updated_frame_function(self, frame_function): + """Sets a ``frame_function`` attribute for the clip. Useful for setting arbitrary/complicated videoclips. Parameters ---------- - make_frame : function + frame_function : function New frame creator function for the clip. """ - self.make_frame = make_frame + self.frame_function = frame_function def with_fps(self, fps, change_duration=False): """Returns a copy of the clip with a new default fps for functions like @@ -358,7 +358,7 @@ def with_memoize(self, memoize): @convert_parameter_to_seconds(["start_time", "end_time"]) @apply_to_mask @apply_to_audio - def with_subclip(self, start_time=0, end_time=None): + def subclipped(self, start_time=0, end_time=None): """Returns a clip playing the content of the current clip between times ``start_time`` and ``end_time``, which can be expressed in seconds (15.35), in (min, sec), in (hour, min, sec), or as a string: @@ -384,7 +384,7 @@ def with_subclip(self, start_time=0, end_time=None): For instance: >>> # cut the last two seconds of the clip: - >>> new_clip = clip.with_subclip(0, -2) + >>> new_clip = clip.subclipped(0, -2) If ``end_time`` is provided or if the clip has a duration attribute, the duration of the returned clip is set automatically. @@ -426,7 +426,7 @@ def with_subclip(self, start_time=0, end_time=None): return new_clip @convert_parameter_to_seconds(["start_time", "end_time"]) - def with_cutout(self, start_time, end_time): + def with_section_cut_out(self, start_time, end_time): """ Returns a clip playing the content of the current clip but skips the extract between ``start_time`` and ``end_time``, which can be @@ -459,7 +459,7 @@ def with_cutout(self, start_time, end_time): else: # pragma: no cover return new_clip - def with_multiply_speed(self, factor: float = None, final_duration: float = None): + def with_speed_scaled(self, factor: float = None, final_duration: float = None): """Returns a clip playing the current clip but at a speed multiplied by ``factor``. For info on the parameters, please see ``vfx.MultiplySpeed``. """ @@ -469,7 +469,7 @@ def with_multiply_speed(self, factor: float = None, final_duration: float = None [MultiplySpeed(factor=factor, final_duration=final_duration)] ) - def with_multiply_volume(self, factor: float, start_time=None, end_time=None): + def with_volume_scaled(self, factor: float, start_time=None, end_time=None): """Returns a new clip with audio volume multiplied by the value `factor`. For info on the parameters, please see ``afx.MultiplyVolume`` """ @@ -608,7 +608,7 @@ def __getitem__(self, key): Simple slicing is implemented via `subclip`. So, ``clip[t_start:t_end]`` is equivalent to - ``clip.with_subclip(t_start, t_end)``. If ``t_start`` is not + ``clip.subclipped(t_start, t_end)``. If ``t_start`` is not given, default to ``0``, if ``t_end`` is not given, default to ``self.duration``. @@ -633,7 +633,7 @@ def __getitem__(self, key): if isinstance(key, slice): # support for [start:end:speed] slicing. If speed is negative # a time mirror is applied. - clip = self.with_subclip(key.start or 0, key.stop or self.duration) + clip = self.subclipped(key.start or 0, key.stop or self.duration) if key.step: # change speed of the subclip diff --git a/moviepy/audio/AudioClip.py b/moviepy/audio/AudioClip.py index 43e61c3c8..6c8ef7b94 100644 --- a/moviepy/audio/AudioClip.py +++ b/moviepy/audio/AudioClip.py @@ -22,7 +22,7 @@ class AudioClip(Clip): See ``AudioFileClip`` and ``CompositeAudioClip`` for usable classes. - An AudioClip is a Clip with a ``make_frame`` attribute of + An AudioClip is a Clip with a ``frame_function`` attribute of the form `` t -> [ f_t ]`` for mono sound and ``t-> [ f1_t, f2_t ]`` for stereo sound (the arrays are Numpy arrays). The `f_t` are floats between -1 and 1. These bounds can be @@ -32,7 +32,7 @@ class AudioClip(Clip): Parameters ---------- - make_frame + frame_function A function `t-> frame at time t`. The frame does not mean much for a sound, it is just a float. What 'makes' the sound are the variations of that float in the time. @@ -49,28 +49,28 @@ class AudioClip(Clip): >>> # Plays the note A in mono (a sine wave of frequency 440 Hz) >>> import numpy as np - >>> make_frame = lambda t: np.sin(440 * 2 * np.pi * t) - >>> clip = AudioClip(make_frame, duration=5, fps=44100) + >>> frame_function = lambda t: np.sin(440 * 2 * np.pi * t) + >>> clip = AudioClip(frame_function, duration=5, fps=44100) >>> clip.preview() >>> # Plays the note A in stereo (two sine waves of frequencies 440 and 880 Hz) - >>> make_frame = lambda t: np.array([ + >>> frame_function = lambda t: np.array([ ... np.sin(440 * 2 * np.pi * t), ... np.sin(880 * 2 * np.pi * t) ... ]).T.copy(order="C") - >>> clip = AudioClip(make_frame, duration=3, fps=44100) + >>> clip = AudioClip(frame_function, duration=3, fps=44100) >>> clip.preview() """ - def __init__(self, make_frame=None, duration=None, fps=None): + def __init__(self, frame_function=None, duration=None, fps=None): super().__init__() if fps is not None: self.fps = fps - if make_frame is not None: - self.make_frame = make_frame + if frame_function is not None: + self.frame_function = frame_function frame0 = self.get_frame(0) if hasattr(frame0, "__iter__"): self.nchannels = len(list(frame0)) @@ -335,7 +335,7 @@ def __init__(self, array, fps): self.fps = fps self.duration = 1.0 * len(array) / fps - def make_frame(t): + def frame_function(t): """Complicated, but must be able to handle the case where t is a list of the form sin(t). """ @@ -352,7 +352,7 @@ def make_frame(t): else: return self.array[i] - self.make_frame = make_frame + self.frame_function = frame_function self.nchannels = len(list(self.get_frame(0))) @@ -400,7 +400,7 @@ def ends(self): """Returns ending times for all clips in the composition.""" return (clip.end for clip in self.clips) - def make_frame(self, t): + def frame_function(self, t): """Renders a frame for the composition for the time ``t``.""" played_parts = [clip.is_playing(t) for clip in self.clips] diff --git a/moviepy/audio/fx/AudioDelay.py b/moviepy/audio/fx/AudioDelay.py index 25c983177..2deca2268 100644 --- a/moviepy/audio/fx/AudioDelay.py +++ b/moviepy/audio/fx/AudioDelay.py @@ -40,10 +40,10 @@ class AudioDelay(Effect): ... ]) >>> # stereo A note - >>> make_frame = lambda t: np.array( + >>> frame_function = lambda t: np.array( ... [np.sin(440 * 2 * np.pi * t), np.sin(880 * 2 * np.pi * t)] ... ).T - ... clip = AudioClip(make_frame=make_frame, duration=0.1, fps=44100) + ... clip = AudioClip(frame_function=frame_function, duration=0.1, fps=44100) ... clip = clip.with_effects([afx.AudioDelay(offset=.2, n_repeats=11, decay=0)]) """ diff --git a/moviepy/audio/io/AudioFileClip.py b/moviepy/audio/io/AudioFileClip.py index dd241f84a..f23bd5706 100644 --- a/moviepy/audio/io/AudioFileClip.py +++ b/moviepy/audio/io/AudioFileClip.py @@ -73,7 +73,7 @@ def __init__( self.buffersize = self.reader.buffersize self.filename = filename - self.make_frame = lambda t: self.reader.get_frame(t) + self.frame_function = lambda t: self.reader.get_frame(t) self.nchannels = self.reader.nchannels def close(self): diff --git a/moviepy/decorators.py b/moviepy/decorators.py index ff0a17e0e..9c7b9a211 100644 --- a/moviepy/decorators.py +++ b/moviepy/decorators.py @@ -110,7 +110,7 @@ def convert_path_to_string(varnames): def add_mask_if_none(func, clip, *args, **kwargs): """Add a mask to the clip if there is none.""" if clip.mask is None: - clip = clip.with_add_mask() + clip = clip.with_mask() return func(clip, *args, **kwargs) diff --git a/moviepy/video/VideoClip.py b/moviepy/video/VideoClip.py index 9c7755556..069b6a252 100644 --- a/moviepy/video/VideoClip.py +++ b/moviepy/video/VideoClip.py @@ -8,7 +8,7 @@ import os import threading from numbers import Real -from typing import TYPE_CHECKING, List +from typing import TYPE_CHECKING, List, Union import numpy as np import proglog @@ -77,7 +77,7 @@ class VideoClip(Clip): is_mask Boolean set to `True` if the clip is a mask. - make_frame + frame_function A function ``t-> frame at time t`` where ``frame`` is a w*h*3 RGB array. @@ -104,16 +104,16 @@ class VideoClip(Clip): """ def __init__( - self, make_frame=None, is_mask=False, duration=None, has_constant_size=True + self, frame_function=None, is_mask=False, duration=None, has_constant_size=True ): super().__init__() self.mask = None self.audio = None self.pos = lambda t: (0, 0) self.relative_pos = False - self.layer = 0 - if make_frame: - self.make_frame = make_frame + self.layer_index = 0 + if frame_function: + self.frame_function = frame_function self.size = self.get_frame(0).shape[:2][::-1] self.is_mask = is_mask self.has_constant_size = has_constant_size @@ -151,7 +151,7 @@ def __copy__(self): This method is intensively used to produce new clips every time there is an outplace transformation of the clip (clip.resize, - clip.with_subclip, etc.) + clip.subclipped, etc.) Acts like a deepcopy except for the fact that readers and other possible unpickleables objects are not copied. @@ -327,7 +327,7 @@ def write_videofile( -------- >>> from moviepy import VideoFileClip - >>> clip = VideoFileClip("myvideo.mp4").with_subclip(100,120) + >>> clip = VideoFileClip("myvideo.mp4").subclipped(100,120) >>> clip.write_videofile("my_new_video.mp4") >>> clip.close() @@ -629,7 +629,7 @@ def preview( # ----------------------------------------------------------------- # F I L T E R I N G - def with_sub_effects( + def with_effects_on_subclip( self, effects: List["Effect"], start_time=0, end_time=None, **kwargs ): """Apply a transformation to a part of the clip. @@ -646,9 +646,9 @@ def with_sub_effects( >>> new_clip = clip.with_sub_effect(MultiplySpeed(0.5), 3, 6) """ - left = None if (start_time == 0) else self.with_subclip(0, start_time) - center = self.with_subclip(start_time, end_time).with_effects(effects, **kwargs) - right = None if (end_time is None) else self.with_subclip(start_time=end_time) + left = None if (start_time == 0) else self.subclipped(0, start_time) + center = self.subclipped(start_time, end_time).with_effects(effects, **kwargs) + right = None if (end_time is None) else self.subclipped(start_time=end_time) clips = [clip for clip in [left, center, right] if clip is not None] @@ -771,28 +771,7 @@ def blit_on(self, picture, t): pos = map(int, pos) return blit(im_img, picture, pos, mask=im_mask) - def with_add_mask(self): - """Add a mask VideoClip to the VideoClip. - - Returns a copy of the clip with a completely opaque mask - (made of ones). This makes computations slower compared to - having a None mask but can be useful in many cases. Choose - - Set ``constant_size`` to `False` for clips with moving - image size. - """ - if self.has_constant_size: - mask = ColorClip(self.size, 1.0, is_mask=True) - return self.with_mask(mask.with_duration(self.duration)) - else: - - def make_frame(t): - return np.ones(self.get_frame(t).shape[:2], dtype=float) - - mask = VideoClip(is_mask=True, make_frame=make_frame) - return self.with_mask(mask.with_duration(self.duration)) - - def with_on_color(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None): + def with_background_color(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None): """Place the clip on a colored background. Returns a clip made of the current clip overlaid on a color @@ -846,13 +825,13 @@ def with_on_color(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None): return result @outplace - def with_make_frame(self, mf): + def with_updated_frame_function(self, mf): """Change the clip's ``get_frame``. - Returns a copy of the VideoClip instance, with the make_frame + Returns a copy of the VideoClip instance, with the frame_function attribute set to `mf`. """ - self.make_frame = mf + self.frame_function = mf self.size = self.get_frame(0).shape[:2][::-1] @outplace @@ -865,14 +844,27 @@ def with_audio(self, audioclip): self.audio = audioclip @outplace - def with_mask(self, mask): + def with_mask(self, mask: Union["VideoClip", str] = "auto"): """Set the clip's mask. Returns a copy of the VideoClip with the mask attribute set to ``mask``, which must be a greyscale (values in 0-1) VideoClip. """ - assert mask is None or mask.is_mask + if mask == "auto": + if self.has_constant_size: + mask = ColorClip(self.size, 1.0, is_mask=True) + return self.with_mask(mask.with_duration(self.duration)) + else: + def frame_function(t): + return np.ones(self.get_frame(t).shape[:2], dtype=float) + + mask = VideoClip(is_mask=True, frame_function=frame_function) self.mask = mask + + @outplace + def without_mask(self): + """Remove the clip's mask.""" + self.mask = None @add_mask_if_none @outplace @@ -918,13 +910,13 @@ def with_position(self, pos, relative=False): @apply_to_mask @outplace - def with_layer(self, layer): + def with_layer_index(self, index): """Set the clip's layer in compositions. Clips with a greater ``layer`` attribute will be displayed on top of others. Note: Only has effect when the clip is used in a CompositeVideoClip. """ - self.layer = layer + self.layer_index = index def resized(self, new_size=None, height=None, width=None, apply_to_mask=True): """Returns a video clip that is a resized version of the clip. @@ -1122,12 +1114,12 @@ def __init__(self, data, data_to_frame, fps, is_mask=False, has_constant_size=Tr self.data_to_frame = data_to_frame self.fps = fps - def make_frame(t): + def frame_function(t): return self.data_to_frame(self.data[int(self.fps * t)]) VideoClip.__init__( self, - make_frame, + frame_function, is_mask=is_mask, duration=1.0 * len(data) / fps, has_constant_size=has_constant_size, @@ -1136,14 +1128,14 @@ def make_frame(t): class UpdatedVideoClip(VideoClip): """ - Class of clips whose make_frame requires some objects to + Class of clips whose frame_function requires some objects to be updated. Particularly practical in science where some algorithm needs to make some steps before a new frame can be generated. - UpdatedVideoClips have the following make_frame: + UpdatedVideoClips have the following frame_function: - >>> def make_frame(t): + >>> def frame_function(t): >>> while self.world.clip_t < t: >>> world.update() # updates, and increases world.clip_t >>> return world.to_frame() @@ -1169,13 +1161,13 @@ class UpdatedVideoClip(VideoClip): def __init__(self, world, is_mask=False, duration=None): self.world = world - def make_frame(t): + def frame_function(t): while self.world.clip_t < t: world.update() return world.to_frame() VideoClip.__init__( - self, make_frame=make_frame, is_mask=is_mask, duration=duration + self, frame_function=frame_function, is_mask=is_mask, duration=duration ) @@ -1246,7 +1238,7 @@ def __init__( # if the image was just a 2D mask, it should arrive here # unchanged - self.make_frame = lambda t: img + self.frame_function = lambda t: img self.size = img.shape[:2][::-1] self.img = img @@ -1278,7 +1270,7 @@ def image_transform(self, image_func, apply_to=None): apply_to = [] arr = image_func(self.get_frame(0)) self.size = arr.shape[:2][::-1] - self.make_frame = lambda t: arr + self.frame_function = lambda t: arr self.img = arr for attr in apply_to: @@ -1873,7 +1865,7 @@ def __init__( VideoClip.__init__( self, - make_frame=lambda t: frame_array[int(t * fps)], + frame_function=lambda t: frame_array[int(t * fps)], is_mask=is_mask, duration=duration, ) diff --git a/moviepy/video/compositing/CompositeVideoClip.py b/moviepy/video/compositing/CompositeVideoClip.py index 99752d312..df905c292 100644 --- a/moviepy/video/compositing/CompositeVideoClip.py +++ b/moviepy/video/compositing/CompositeVideoClip.py @@ -87,7 +87,7 @@ def __init__( self.created_bg = True # order self.clips by layer - self.clips = sorted(self.clips, key=lambda clip: clip.layer) + self.clips = sorted(self.clips, key=lambda clip: clip.layer_index) # compute duration ends = [clip.end for clip in self.clips] @@ -104,11 +104,11 @@ def __init__( # compute mask if necessary if transparent: maskclips = [ - (clip.mask if (clip.mask is not None) else clip.with_add_mask().mask) + (clip.mask if (clip.mask is not None) else clip.with_mask().mask) .with_position(clip.pos) .with_end(clip.end) .with_start(clip.start, change_end=False) - .with_layer(clip.layer) + .with_layer_index(clip.layer_index) for clip in self.clips ] @@ -116,7 +116,7 @@ def __init__( maskclips, self.size, is_mask=True, bg_color=0.0 ) - def make_frame(self, t): + def frame_function(self, t): """The clips playing at time `t` are blitted over one another.""" frame = self.bg.get_frame(t).astype("uint8") im = Image.fromarray(frame) @@ -284,7 +284,7 @@ def concatenate_videoclips( if method == "chain": - def make_frame(t): + def frame_function(t): i = max([i for i, e in enumerate(timings) if e <= t]) return clips[i].get_frame(t - timings[i]) @@ -294,7 +294,7 @@ def get_mask(clip): mask.duration = clip.duration return mask - result = VideoClip(is_mask=is_mask, make_frame=make_frame) + result = VideoClip(is_mask=is_mask, frame_function=frame_function) if any([clip.mask is not None for clip in clips]): masks = [get_mask(clip) for clip in clips] result.mask = concatenate_videoclips(masks, method="chain", is_mask=True) diff --git a/moviepy/video/fx/Blink.py b/moviepy/video/fx/Blink.py index 2274c02d1..dd37f7f6d 100644 --- a/moviepy/video/fx/Blink.py +++ b/moviepy/video/fx/Blink.py @@ -17,7 +17,7 @@ class Blink(Effect): def apply(self, clip): """Apply the effect to the clip.""" if clip.mask is None: - clip = clip.with_add_mask() + clip = clip.with_mask() duration = self.duration_on + self.duration_off clip.mask = clip.mask.transform( diff --git a/moviepy/video/fx/CrossFadeIn.py b/moviepy/video/fx/CrossFadeIn.py index 09d179c68..759646ec2 100644 --- a/moviepy/video/fx/CrossFadeIn.py +++ b/moviepy/video/fx/CrossFadeIn.py @@ -19,7 +19,7 @@ def apply(self, clip: Clip) -> Clip: raise ValueError("Attribute 'duration' not set") if clip.mask is None: - clip = clip.with_add_mask() + clip = clip.with_mask() clip.mask.duration = clip.duration clip.mask = clip.mask.with_effects([FadeIn(self.duration)]) diff --git a/moviepy/video/fx/CrossFadeOut.py b/moviepy/video/fx/CrossFadeOut.py index 5076240ad..d86671db0 100644 --- a/moviepy/video/fx/CrossFadeOut.py +++ b/moviepy/video/fx/CrossFadeOut.py @@ -19,7 +19,7 @@ def apply(self, clip: Clip) -> Clip: raise ValueError("Attribute 'duration' not set") if clip.mask is None: - clip = clip.with_add_mask() + clip = clip.with_mask() clip.mask.duration = clip.duration clip.mask = clip.mask.with_effects([FadeOut(self.duration)]) diff --git a/moviepy/video/fx/MakeLoopable.py b/moviepy/video/fx/MakeLoopable.py index 5632b7c1a..3680bf25f 100644 --- a/moviepy/video/fx/MakeLoopable.py +++ b/moviepy/video/fx/MakeLoopable.py @@ -25,6 +25,6 @@ def apply(self, clip: Clip) -> Clip: clip2 = clip.with_effects([CrossFadeIn(self.overlap_duration)]).with_start( clip.duration - self.overlap_duration ) - return CompositeVideoClip([clip, clip2]).with_subclip( + return CompositeVideoClip([clip, clip2]).subclipped( self.overlap_duration, clip.duration ) diff --git a/moviepy/video/fx/Margin.py b/moviepy/video/fx/Margin.py index 77c5389c2..f696faa4b 100644 --- a/moviepy/video/fx/Margin.py +++ b/moviepy/video/fx/Margin.py @@ -48,7 +48,7 @@ class Margin(Effect): def add_margin(self, clip: Clip): """Add margins to the clip.""" if (self.opacity != 1.0) and (clip.mask is None) and not (clip.is_mask): - clip = clip.with_add_mask() + clip = clip.with_mask() if self.margin_size is not None: self.left = self.right = self.top = self.bottom = self.margin_size diff --git a/moviepy/video/fx/Rotate.py b/moviepy/video/fx/Rotate.py index b19c5e4e4..4fb9a5935 100644 --- a/moviepy/video/fx/Rotate.py +++ b/moviepy/video/fx/Rotate.py @@ -16,7 +16,7 @@ class Rotate(Effect): and ``bg_color`` are not ``None``, there will be black borders. You can make them transparent with: - >>> new_clip = clip.with_add_mask().rotate(72) + >>> new_clip = clip.with_mask().rotate(72) Parameters ---------- diff --git a/moviepy/video/io/ImageSequenceClip.py b/moviepy/video/io/ImageSequenceClip.py index 01ff55355..96b407f0c 100644 --- a/moviepy/video/io/ImageSequenceClip.py +++ b/moviepy/video/io/ImageSequenceClip.py @@ -121,7 +121,7 @@ def find_image_index(t): self.last_index = None self.last_image = None - def make_frame(t): + def frame_function(t): index = find_image_index(t) if index != self.last_index: @@ -135,7 +135,7 @@ def make_frame(t): self.mask.last_index = None self.mask.last_image = None - def mask_make_frame(t): + def mask_frame_function(t): index = find_image_index(t) if index != self.mask.last_index: frame = imread(self.sequence[index])[:, :, 3] @@ -144,24 +144,24 @@ def mask_make_frame(t): return self.mask.last_image - self.mask.make_frame = mask_make_frame - self.mask.size = mask_make_frame(0).shape[:2][::-1] + self.mask.frame_function = mask_frame_function + self.mask.size = mask_frame_function(0).shape[:2][::-1] else: - def make_frame(t): + def frame_function(t): index = find_image_index(t) return self.sequence[index][:, :, :3] if with_mask and (self.sequence[0].shape[2] == 4): self.mask = VideoClip(is_mask=True) - def mask_make_frame(t): + def mask_frame_function(t): index = find_image_index(t) return 1.0 * self.sequence[index][:, :, 3] / 255 - self.mask.make_frame = mask_make_frame - self.mask.size = mask_make_frame(0).shape[:2][::-1] + self.mask.frame_function = mask_frame_function + self.mask.size = mask_frame_function(0).shape[:2][::-1] - self.make_frame = make_frame - self.size = make_frame(0).shape[:2][::-1] + self.frame_function = frame_function + self.size = frame_function(0).shape[:2][::-1] diff --git a/moviepy/video/io/VideoFileClip.py b/moviepy/video/io/VideoFileClip.py index 1f41f4eee..e8486b38b 100644 --- a/moviepy/video/io/VideoFileClip.py +++ b/moviepy/video/io/VideoFileClip.py @@ -125,18 +125,18 @@ def __init__( self.filename = filename if has_mask: - self.make_frame = lambda t: self.reader.get_frame(t)[:, :, :3] + self.frame_function = lambda t: self.reader.get_frame(t)[:, :, :3] - def mask_make_frame(t): + def mask_frame_function(t): return self.reader.get_frame(t)[:, :, 3] / 255.0 self.mask = VideoClip( - is_mask=True, make_frame=mask_make_frame + is_mask=True, frame_function=mask_frame_function ).with_duration(self.duration) self.mask.fps = self.fps else: - self.make_frame = lambda t: self.reader.get_frame(t) + self.frame_function = lambda t: self.reader.get_frame(t) # Make a reader for the audio, if any. if audio and self.reader.infos["audio_found"]: diff --git a/moviepy/video/tools/cuts.py b/moviepy/video/tools/cuts.py index 2baaa6dad..6c7f30bb0 100644 --- a/moviepy/video/tools/cuts.py +++ b/moviepy/video/tools/cuts.py @@ -31,7 +31,7 @@ def find_video_period(clip, fps=None, start_time=0.3): >>> from moviepy import * >>> from moviepy.video.tools.cuts import find_video_period >>> - >>> clip = VideoFileClip("media/chaplin.mp4").with_subclip(0, 1).loop(2) + >>> clip = VideoFileClip("media/chaplin.mp4").subclipped(0, 1).loop(2) >>> round(videotools.find_video_period(clip, fps=80), 6) 1 """ @@ -237,7 +237,7 @@ def from_clip(clip, distance_threshold, max_duration, fps=None, logger="bar"): ... clip, distance_threshold=10, max_duration=3, # will take time ... ) >>> best = matches.filter(lambda m: m.time_span > 1.5).best() - >>> clip.with_subclip(best.start_time, best.end_time).write_gif("foo.gif") + >>> clip.subclipped(best.start_time, best.end_time).write_gif("foo.gif") """ N_pixels = clip.w * clip.h * 3 @@ -342,7 +342,7 @@ def select_scenes( >>> from moviepy import * >>> from moviepy.video.tools.cuts import FramesMatches >>> - >>> ch_clip = VideoFileClip("media/chaplin.mp4").with_subclip(1, 4) + >>> ch_clip = VideoFileClip("media/chaplin.mp4").subclipped(1, 4) >>> mirror_and_clip = [ch_clip.with_effects([vfx.TimeMirror()]), ch_clip] >>> clip = concatenate_videoclips(mirror_and_clip) >>> @@ -430,7 +430,7 @@ def write_gifs(self, clip, gifs_dir, **kwargs): >>> from moviepy import * >>> from moviepy.video.tools.cuts import FramesMatches >>> - >>> ch_clip = VideoFileClip("media/chaplin.mp4").with_subclip(1, 4) + >>> ch_clip = VideoFileClip("media/chaplin.mp4").subclipped(1, 4) >>> clip = concatenate_videoclips([ch_clip.time_mirror(), ch_clip]) >>> >>> result = FramesMatches.from_clip(clip, 10, 3).select_scenes( @@ -446,7 +446,7 @@ def write_gifs(self, clip, gifs_dir, **kwargs): """ for start, end, _, _ in self: name = "%s/%08d_%08d.gif" % (gifs_dir, 100 * start, 100 * end) - clip.with_subclip(start, end).write_gif(name, **kwargs) + clip.subclipped(start, end).write_gif(name, **kwargs) @use_clip_fps_by_default diff --git a/moviepy/video/tools/subtitles.py b/moviepy/video/tools/subtitles.py index fa1309dd0..856ddb12a 100644 --- a/moviepy/video/tools/subtitles.py +++ b/moviepy/video/tools/subtitles.py @@ -109,7 +109,7 @@ def add_textclip_if_none(t): return sub - def make_frame(t): + def frame_function(t): sub = add_textclip_if_none(t) return self.textclips[sub].get_frame(t) if sub else np.array([[[0, 0, 0]]]) @@ -117,7 +117,7 @@ def make_mask_frame(t): sub = add_textclip_if_none(t) return self.textclips[sub].mask.get_frame(t) if sub else np.array([[0]]) - self.make_frame = make_frame + self.frame_function = frame_function hasmask = bool(self.make_textclip("T").mask) self.mask = VideoClip(make_mask_frame, is_mask=True) if hasmask else None diff --git a/tests/conftest.py b/tests/conftest.py index f1752d1f1..e673a8f4c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -30,7 +30,7 @@ @functools.lru_cache(maxsize=None) def get_video(start_time=0, end_time=1): - return VideoFileClip("media/big_buck_bunny_432_433.webm").with_subclip( + return VideoFileClip("media/big_buck_bunny_432_433.webm").subclipped( start_time, end_time ) diff --git a/tests/test_AudioClips.py b/tests/test_AudioClips.py index ca62c1b4b..b818d4b93 100644 --- a/tests/test_AudioClips.py +++ b/tests/test_AudioClips.py @@ -162,7 +162,7 @@ def test_concatenate_audioclip_with_audiofileclip(util, stereo_wave): def test_concatenate_audiofileclips(util): - clip1 = AudioFileClip("media/crunching.mp3").with_subclip(1, 4) + clip1 = AudioFileClip("media/crunching.mp3").subclipped(1, 4) # Checks it works with videos as well clip2 = AudioFileClip("media/big_buck_bunny_432_433.webm") @@ -186,7 +186,7 @@ def test_audioclip_mono_max_volume(mono_wave): @pytest.mark.parametrize(("nchannels"), (2, 4, 8, 16)) @pytest.mark.parametrize(("channel_muted"), ("left", "right")) def test_audioclip_stereo_max_volume(nchannels, channel_muted): - def make_frame(t): + def frame_function(t): frame = [] # build channels (one of each pair muted) for i in range(int(nchannels / 2)): @@ -200,7 +200,7 @@ def make_frame(t): frame.append(np.sin(t * 0)) return np.array(frame).T - clip = AudioClip(make_frame, fps=44100, duration=1) + clip = AudioClip(frame_function, fps=44100, duration=1) max_volume = clip.max_volume(stereo=True) # if `stereo == True`, `AudioClip.max_volume` returns a Numpy array` assert isinstance(max_volume, np.ndarray) diff --git a/tests/test_Clip.py b/tests/test_Clip.py index 979cabc9a..397f228bb 100644 --- a/tests/test_Clip.py +++ b/tests/test_Clip.py @@ -194,9 +194,9 @@ def test_clip_subclip(duration, start_time, end_time, expected_duration): if hasattr(expected_duration, "__traceback__"): with pytest.raises(expected_duration): - clip.with_subclip(start_time=start_time, end_time=end_time) + clip.subclipped(start_time=start_time, end_time=end_time) else: - sub_clip = clip.with_subclip(start_time=start_time, end_time=end_time) + sub_clip = clip.subclipped(start_time=start_time, end_time=end_time) assert sub_clip.duration == expected_duration @@ -232,7 +232,7 @@ def test_clip_subclip(duration, start_time, end_time, expected_duration): ) def test_clip_cutout(start_time, end_time, expected_frames): clip = BitmapClip([["RR", "RR"], ["GG", "GG"], ["BB", "BB"]], fps=1) - new_clip = clip.with_cutout(start_time, end_time) + new_clip = clip.with_section_cut_out(start_time, end_time) assert new_clip == BitmapClip(expected_frames, fps=1) diff --git a/tests/test_SubtitlesClip.py b/tests/test_SubtitlesClip.py index e124e0fa1..9a8a2124a 100644 --- a/tests/test_SubtitlesClip.py +++ b/tests/test_SubtitlesClip.py @@ -44,7 +44,7 @@ def test_subtitles(util): subtitles = SubtitlesClip("media/subtitles.srt", make_textclip=generator) final = CompositeVideoClip([myvideo, subtitles]) - final.with_subclip(0, 0.5).write_videofile( + final.subclipped(0, 0.5).write_videofile( os.path.join(util.TMP_DIR, "subtitles.mp4"), fps=5, logger=None, diff --git a/tests/test_VideoClip.py b/tests/test_VideoClip.py index d4189f7f1..e7c8528ab 100644 --- a/tests/test_VideoClip.py +++ b/tests/test_VideoClip.py @@ -95,7 +95,7 @@ def test_write_frame_errors_with_redirected_logs(util, video): def test_write_videofiles_with_temp_audiofile_path(util): - clip = VideoFileClip("media/big_buck_bunny_432_433.webm").with_subclip(0.2, 0.5) + clip = VideoFileClip("media/big_buck_bunny_432_433.webm").subclipped(0.2, 0.5) location = os.path.join(util.TMP_DIR, "temp_audiofile_path.webm") temp_location = os.path.join(util.TMP_DIR, "temp_audiofile") if not os.path.exists(temp_location): @@ -183,9 +183,9 @@ def test_write_gif(util, video): def test_with_sub_effetcs(util): - clip = VideoFileClip("media/big_buck_bunny_0_30.webm").with_subclip(0, 1) - new_clip = clip.with_sub_effects([vfx.MultiplySpeed(0.5)]) - location = os.path.join(util.TMP_DIR, "with_sub_effects.mp4") + clip = VideoFileClip("media/big_buck_bunny_0_30.webm").subclipped(0, 1) + new_clip = clip.with_effects_on_subclip([vfx.MultiplySpeed(0.5)]) + location = os.path.join(util.TMP_DIR, "with_effects_on_subclip.mp4") new_clip.write_videofile(location) assert os.path.isfile(location) @@ -193,7 +193,7 @@ def test_with_sub_effetcs(util): def test_oncolor(util): # It doesn't need to be a ColorClip clip = ColorClip(size=(100, 60), color=(255, 0, 0), duration=0.5) - on_color_clip = clip.with_on_color(size=(200, 160), color=(0, 0, 255)) + on_color_clip = clip.with_background_color(size=(200, 160), color=(0, 0, 255)) location = os.path.join(util.TMP_DIR, "oncolor.mp4") on_color_clip.write_videofile(location, fps=24) assert os.path.isfile(location) @@ -215,8 +215,8 @@ def test_oncolor(util): def test_setaudio(util): clip = ColorClip(size=(100, 60), color=(255, 0, 0), duration=0.5) - make_frame_440 = lambda t: [np.sin(440 * 2 * np.pi * t)] - audio = AudioClip(make_frame_440, duration=0.5) + frame_function_440 = lambda t: [np.sin(440 * 2 * np.pi * t)] + audio = AudioClip(frame_function_440, duration=0.5) audio.fps = 44100 clip = clip.with_audio(audio) location = os.path.join(util.TMP_DIR, "setaudio.mp4") @@ -226,7 +226,7 @@ def test_setaudio(util): def test_setaudio_with_audiofile(util): clip = ColorClip(size=(100, 60), color=(255, 0, 0), duration=0.5) - audio = AudioFileClip("media/crunching.mp3").with_subclip(0, 0.5) + audio = AudioFileClip("media/crunching.mp3").subclipped(0, 0.5) clip = clip.with_audio(audio) location = os.path.join(util.TMP_DIR, "setaudiofile.mp4") clip.write_videofile(location, fps=24) @@ -236,26 +236,26 @@ def test_setaudio_with_audiofile(util): def test_setopacity(util, video): clip = video(start_time=0.2, end_time=0.6) clip = clip.with_opacity(0.5) - clip = clip.with_on_color(size=(1000, 1000), color=(0, 0, 255), col_opacity=0.8) + clip = clip.with_background_color(size=(1000, 1000), color=(0, 0, 255), col_opacity=0.8) location = os.path.join(util.TMP_DIR, "setopacity.mp4") clip.write_videofile(location) assert os.path.isfile(location) -def test_with_layer(): - bottom_clip = BitmapClip([["ABC"], ["BCA"], ["CAB"]], fps=1).with_layer(1) - top_clip = BitmapClip([["DEF"], ["EFD"]], fps=1).with_layer(2) +def test_with_layer_index(): + bottom_clip = BitmapClip([["ABC"], ["BCA"], ["CAB"]], fps=1).with_layer_index(1) + top_clip = BitmapClip([["DEF"], ["EFD"]], fps=1).with_layer_index(2) composite_clip = CompositeVideoClip([bottom_clip, top_clip]) reversed_composite_clip = CompositeVideoClip([top_clip, bottom_clip]) # Make sure that the order of clips makes no difference to the composite clip - assert composite_clip.with_subclip(0, 2) == reversed_composite_clip.with_subclip( + assert composite_clip.subclipped(0, 2) == reversed_composite_clip.subclipped( 0, 2 ) # Make sure that only the 'top' clip is kept - assert top_clip.with_subclip(0, 2) == composite_clip.with_subclip(0, 2) + assert top_clip.subclipped(0, 2) == composite_clip.subclipped(0, 2) # Make sure that it works even when there is only one clip playing at that time target_clip = BitmapClip([["DEF"], ["EFD"], ["CAB"]], fps=1) diff --git a/tests/test_VideoFileClip.py b/tests/test_VideoFileClip.py index f53374310..db06d460b 100644 --- a/tests/test_VideoFileClip.py +++ b/tests/test_VideoFileClip.py @@ -52,7 +52,7 @@ def test_copied_videofileclip_write_videofile(util): input_video_filepath = "media/big_buck_bunny_432_433.webm" output_video_filepath = os.path.join(util.TMP_DIR, "copied_videofileclip.mp4") - clip = VideoFileClip(input_video_filepath).with_subclip(0, 1) + clip = VideoFileClip(input_video_filepath).subclipped(0, 1) copied_clip = clip.copy() copied_clip.write_videofile(output_video_filepath) diff --git a/tests/test_compositing.py b/tests/test_compositing.py index 7835e2f43..43018d747 100644 --- a/tests/test_compositing.py +++ b/tests/test_compositing.py @@ -16,7 +16,7 @@ def __init__(self, clip): self.clip = clip def expect_color_at(self, ts, expected, xy=[0, 0]): - frame = self.clip.make_frame(ts) + frame = self.clip.frame_function(ts) r, g, b = expected actual = frame[xy[1]][xy[0]] diff = abs(actual[0] - r) + abs(actual[1] - g) + abs(actual[2] - b) diff --git a/tests/test_ffmpeg_reader.py b/tests/test_ffmpeg_reader.py index 6e1320490..d619efcc4 100644 --- a/tests/test_ffmpeg_reader.py +++ b/tests/test_ffmpeg_reader.py @@ -292,7 +292,7 @@ def test_ffmpeg_parse_video_rotation(): def test_correct_video_rotation(util): """See https://github.com/Zulko/moviepy/pull/577""" - clip = VideoFileClip("media/rotated-90-degrees.mp4").with_subclip(0.2, 0.4) + clip = VideoFileClip("media/rotated-90-degrees.mp4").subclipped(0.2, 0.4) corrected_rotation_filename = os.path.join( util.TMP_DIR, diff --git a/tests/test_fx.py b/tests/test_fx.py index b90aa6297..746b84913 100644 --- a/tests/test_fx.py +++ b/tests/test_fx.py @@ -1082,8 +1082,8 @@ def test_audio_normalize(): def test_audio_normalize_muted(): z_array = np.array([0.0]) - make_frame = lambda t: z_array - clip = AudioClip(make_frame, duration=1, fps=44100) + frame_function = lambda t: z_array + clip = AudioClip(frame_function, duration=1, fps=44100) clip = clip.with_effects([afx.AudioNormalize()]) assert np.array_equal(clip.to_soundarray(), z_array) @@ -1173,17 +1173,17 @@ def test_multiply_volume_audioclip( end_time, ): if sound_type == "stereo": - make_frame = lambda t: np.array( + frame_function = lambda t: np.array( [ np.sin(440 * 2 * np.pi * t), np.sin(160 * 2 * np.pi * t), ] ).T.copy(order="C") else: - make_frame = lambda t: [np.sin(440 * 2 * np.pi * t)] + frame_function = lambda t: [np.sin(440 * 2 * np.pi * t)] clip = AudioClip( - make_frame, + frame_function, duration=duration if duration else 0.1, fps=22050, ) @@ -1282,7 +1282,7 @@ def test_multiply_volume_videoclip(): clip = ( VideoFileClip("media/chaplin.mp4") - .with_subclip(0, 0.3) + .subclipped(0, 0.3) .with_effects( [ afx.MultiplyVolume( @@ -1374,7 +1374,7 @@ def test_audio_delay(stereo_wave, duration, offset, n_repeats, decay): # stereo audio clip clip = AudioClip( - make_frame=stereo_wave(left_freq=440, right_freq=880), + frame_function=stereo_wave(left_freq=440, right_freq=880), duration=duration, fps=44100, ) @@ -1448,11 +1448,11 @@ def test_audio_fadein( mono_wave, stereo_wave, sound_type, fps, clip_duration, fadein_duration ): if sound_type == "stereo": - make_frame = stereo_wave(left_freq=440, right_freq=160) + frame_function = stereo_wave(left_freq=440, right_freq=160) else: - make_frame = mono_wave(440) + frame_function = mono_wave(440) - clip = AudioClip(make_frame, duration=clip_duration, fps=fps) + clip = AudioClip(frame_function, duration=clip_duration, fps=fps) new_clip = clip.with_effects([afx.AudioFadeIn(fadein_duration)]) # first frame is muted @@ -1474,7 +1474,7 @@ def test_audio_fadein( start_times = np.arange(0, fadein_duration, time_foreach_part) for i, start_time in enumerate(start_times): end_time = start_time + time_foreach_part - subclip_max_volume = new_clip.with_subclip(start_time, end_time).max_volume() + subclip_max_volume = new_clip.subclipped(start_time, end_time).max_volume() possible_value = (i + 1) / n_parts assert round(subclip_max_volume, 2) in [ @@ -1488,7 +1488,7 @@ def test_audio_fadein( start_times = np.arange(fadein_duration, clip_duration, time_foreach_part) for i, start_time in enumerate(start_times): end_time = start_time + time_foreach_part - subclip_max_volume = new_clip.with_subclip(start_time, end_time).max_volume() + subclip_max_volume = new_clip.subclipped(start_time, end_time).max_volume() assert round(subclip_max_volume, 4) == 1 @@ -1509,11 +1509,11 @@ def test_audio_fadeout( mono_wave, stereo_wave, sound_type, fps, clip_duration, fadeout_duration ): if sound_type == "stereo": - make_frame = stereo_wave(left_freq=440, right_freq=160) + frame_function = stereo_wave(left_freq=440, right_freq=160) else: - make_frame = mono_wave(440) + frame_function = mono_wave(440) - clip = AudioClip(make_frame, duration=clip_duration, fps=fps) + clip = AudioClip(frame_function, duration=clip_duration, fps=fps) new_clip = clip.with_effects([afx.AudioFadeOut(fadeout_duration)]) fadeout_duration = convert_to_seconds(fadeout_duration) @@ -1530,7 +1530,7 @@ def test_audio_fadeout( ) for i, start_time in enumerate(start_times): end_time = start_time + time_foreach_part - subclip_max_volume = new_clip.with_subclip(start_time, end_time).max_volume() + subclip_max_volume = new_clip.subclipped(start_time, end_time).max_volume() possible_value = 1 - i * 0.1 assert round(subclip_max_volume, 2) in [ @@ -1544,7 +1544,7 @@ def test_audio_fadeout( start_times = np.arange(0, clip_duration - fadeout_duration, time_foreach_part) for i, start_time in enumerate(start_times): end_time = start_time + time_foreach_part - subclip_max_volume = new_clip.with_subclip(start_time, end_time).max_volume() + subclip_max_volume = new_clip.subclipped(start_time, end_time).max_volume() assert round(subclip_max_volume, 4) == 1 diff --git a/tests/test_issues.py b/tests/test_issues.py index 5665480eb..a23354b58 100644 --- a/tests/test_issues.py +++ b/tests/test_issues.py @@ -229,7 +229,7 @@ def size(t): avatar.with_mask(maskclip) # must set maskclip here.. concatenated = avatar * 3 - tt = VideoFileClip("media/big_buck_bunny_0_30.webm").with_subclip(0, 3) + tt = VideoFileClip("media/big_buck_bunny_0_30.webm").subclipped(0, 3) # TODO: Setting mask here does not work: # .with_mask(maskclip).resize(size)]) final = CompositeVideoClip( @@ -303,13 +303,13 @@ def test_issue_470(util): audio_clip = AudioFileClip("media/crunching.mp3") # end_time is out of bounds - subclip = audio_clip.with_subclip(start_time=6, end_time=9) + subclip = audio_clip.subclipped(start_time=6, end_time=9) with pytest.raises(IOError): subclip.write_audiofile(wav_filename, write_logfile=True) # but this one should work.. - subclip = audio_clip.with_subclip(start_time=6, end_time=8) + subclip = audio_clip.subclipped(start_time=6, end_time=8) subclip.write_audiofile(wav_filename, write_logfile=True) @@ -327,8 +327,8 @@ def test_issue_547(): def test_issue_636(): - with VideoFileClip("media/big_buck_bunny_0_30.webm").with_subclip(0, 11) as video: - with video.with_subclip(0, 1) as _: + with VideoFileClip("media/big_buck_bunny_0_30.webm").subclipped(0, 11) as video: + with video.subclipped(0, 1) as _: pass @@ -336,9 +336,9 @@ def test_issue_655(): video_file = "media/fire2.mp4" for subclip in [(0, 2), (1, 2), (2, 3)]: with VideoFileClip(video_file) as v: - with v.with_subclip(1, 2) as _: + with v.subclipped(1, 2) as _: pass - next(v.with_subclip(*subclip).iter_frames()) + next(v.subclipped(*subclip).iter_frames()) assert True diff --git a/tests/test_videotools.py b/tests/test_videotools.py index 28837916d..52c4e24ae 100644 --- a/tests/test_videotools.py +++ b/tests/test_videotools.py @@ -79,7 +79,7 @@ def test_detect_scenes(): def test_find_video_period(): clip = ( VideoFileClip("media/chaplin.mp4") - .with_subclip(0, 0.5) + .subclipped(0, 0.5) .with_effects([vfx.Loop(2)]) ) # fps=25 @@ -303,7 +303,7 @@ def test_FramesMatches_select_scenes( ): video_clip = VideoFileClip(filename) if subclip is not None: - video_clip = video_clip.with_subclip(subclip[0], subclip[1]) + video_clip = video_clip.subclipped(subclip[0], subclip[1]) clip = concatenate_videoclips( [video_clip.with_effects([vfx.TimeMirror()]), video_clip] ) @@ -318,7 +318,7 @@ def test_FramesMatches_select_scenes( def test_FramesMatches_write_gifs(util): - video_clip = VideoFileClip("media/chaplin.mp4").with_subclip(0, 0.2) + video_clip = VideoFileClip("media/chaplin.mp4").subclipped(0, 0.2) clip = concatenate_videoclips( [video_clip.with_effects([vfx.TimeMirror()]), video_clip] ) @@ -1043,7 +1043,7 @@ def test_Trajectory_from_to_file(util): id="FakeClip", ), pytest.param( - VideoFileClip("media/chaplin.mp4").with_subclip(0, 1), + VideoFileClip("media/chaplin.mp4").subclipped(0, 1), None, None, None, @@ -1088,8 +1088,8 @@ def test_find_audio_period(mono_wave, stereo_wave, wave_type): wave2 = stereo_wave(left_freq=100, right_freq=200) clip = CompositeAudioClip( [ - AudioClip(make_frame=wave1, duration=0.3, fps=22050), - AudioClip(make_frame=wave2, duration=0.3, fps=22050).with_effects( + AudioClip(frame_function=wave1, duration=0.3, fps=22050), + AudioClip(frame_function=wave2, duration=0.3, fps=22050).with_effects( [afx.MultiplyVolume(0, end_time=0.1)] ), ] From 304fc50515c10d1df0e74fcd28ecc900ba8c46ac Mon Sep 17 00:00:00 2001 From: Zulko Date: Sat, 23 Nov 2024 13:34:58 -0500 Subject: [PATCH 02/12] fix --- docs/_static/code/user_guide/loading/masks.py | 4 ++-- moviepy/video/VideoClip.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/_static/code/user_guide/loading/masks.py b/docs/_static/code/user_guide/loading/masks.py index 1ebc002eb..233313a20 100644 --- a/docs/_static/code/user_guide/loading/masks.py +++ b/docs/_static/code/user_guide/loading/masks.py @@ -2,10 +2,10 @@ import numpy as np # Random RGB noise image of 200x100 -makeframe = lambda t: np.random.rand(100, 200) +frame_function = lambda t: np.random.rand(100, 200) # To define the VideoClip as a mask, just pass parameter is_mask as True -maskclip1 = VideoClip(makeframe, duration=4, is_mask=True) # A random noise mask +maskclip1 = VideoClip(frame_function, duration=4, is_mask=True) # A random noise mask maskclip2 = ImageClip("example_mask.jpg", is_mask=True) # A fixed mask as jpeg maskclip3 = VideoFileClip("example_mask.mp4", is_mask=True) # A video as a mask diff --git a/moviepy/video/VideoClip.py b/moviepy/video/VideoClip.py index 069b6a252..dea26dc41 100644 --- a/moviepy/video/VideoClip.py +++ b/moviepy/video/VideoClip.py @@ -850,7 +850,7 @@ def with_mask(self, mask: Union["VideoClip", str] = "auto"): Returns a copy of the VideoClip with the mask attribute set to ``mask``, which must be a greyscale (values in 0-1) VideoClip. """ - if mask == "auto": + if mask == "auto": if self.has_constant_size: mask = ColorClip(self.size, 1.0, is_mask=True) return self.with_mask(mask.with_duration(self.duration)) @@ -858,7 +858,7 @@ def with_mask(self, mask: Union["VideoClip", str] = "auto"): def frame_function(t): return np.ones(self.get_frame(t).shape[:2], dtype=float) - mask = VideoClip(is_mask=True, frame_function=frame_function) + mask = VideoClip(is_mask=True, frame_function=frame_function) self.mask = mask @outplace From 221d0c6635c886ca5f6b77f385b01358f4c57e79 Mon Sep 17 00:00:00 2001 From: Zulko Date: Sat, 23 Nov 2024 13:42:24 -0500 Subject: [PATCH 03/12] test fixes --- moviepy/video/VideoClip.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/moviepy/video/VideoClip.py b/moviepy/video/VideoClip.py index dea26dc41..89f53bd45 100644 --- a/moviepy/video/VideoClip.py +++ b/moviepy/video/VideoClip.py @@ -8,7 +8,7 @@ import os import threading from numbers import Real -from typing import TYPE_CHECKING, List, Union +from typing import TYPE_CHECKING, List, Union, Callable import numpy as np import proglog @@ -771,7 +771,7 @@ def blit_on(self, picture, t): pos = map(int, pos) return blit(im_img, picture, pos, mask=im_mask) - def with_background_color(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None): + def with_background_color(self, size=None, color=(0, 0, 0), pos=None, opacity=None): """Place the clip on a colored background. Returns a clip made of the current clip overlaid on a color @@ -802,10 +802,10 @@ def with_background_color(self, size=None, color=(0, 0, 0), pos=None, col_opacit if pos is None: pos = "center" - if col_opacity is not None: + if opacity is not None: colorclip = ColorClip( size, color=color, duration=self.duration - ).with_opacity(col_opacity) + ).with_opacity(opacity) result = CompositeVideoClip([colorclip, self.with_position(pos)]) else: result = CompositeVideoClip( @@ -825,13 +825,13 @@ def with_background_color(self, size=None, color=(0, 0, 0), pos=None, col_opacit return result @outplace - def with_updated_frame_function(self, mf): + def with_updated_frame_function(self, frame_function: Callable[[float], np.ndarray]): """Change the clip's ``get_frame``. Returns a copy of the VideoClip instance, with the frame_function attribute set to `mf`. """ - self.frame_function = mf + self.frame_function = frame_function self.size = self.get_frame(0).shape[:2][::-1] @outplace @@ -853,7 +853,6 @@ def with_mask(self, mask: Union["VideoClip", str] = "auto"): if mask == "auto": if self.has_constant_size: mask = ColorClip(self.size, 1.0, is_mask=True) - return self.with_mask(mask.with_duration(self.duration)) else: def frame_function(t): return np.ones(self.get_frame(t).shape[:2], dtype=float) From 0774bbacca89e90122a3ded758d9218b117f231a Mon Sep 17 00:00:00 2001 From: Zulko Date: Sat, 23 Nov 2024 13:50:00 -0500 Subject: [PATCH 04/12] test fixes --- .github/workflows/formatting_linting.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/formatting_linting.yml b/.github/workflows/formatting_linting.yml index 4de0e259e..a8bd0c146 100644 --- a/.github/workflows/formatting_linting.yml +++ b/.github/workflows/formatting_linting.yml @@ -47,7 +47,7 @@ jobs: - name: Show Flake8 version run: flake8 --version - name: Run Flake8 - run: flake8 -v --show-source moviepy docs/conf.py examples tests + run: flake8 -v --show-source --max-line-length=92 moviepy docs/conf.py examples tests isort: name: isort import sorter From c0cf6c3ea28c2a49af1004a57f4d3c423b2d6bbd Mon Sep 17 00:00:00 2001 From: Zulko Date: Sat, 23 Nov 2024 14:44:35 -0500 Subject: [PATCH 05/12] test fixes --- moviepy/video/VideoClip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moviepy/video/VideoClip.py b/moviepy/video/VideoClip.py index 89f53bd45..7fba8714d 100644 --- a/moviepy/video/VideoClip.py +++ b/moviepy/video/VideoClip.py @@ -859,7 +859,7 @@ def frame_function(t): mask = VideoClip(is_mask=True, frame_function=frame_function) self.mask = mask - + @outplace def without_mask(self): """Remove the clip's mask.""" From 149152ddf18f2909ba9b1662755bc79b0a65c499 Mon Sep 17 00:00:00 2001 From: Zulko Date: Sat, 23 Nov 2024 14:46:03 -0500 Subject: [PATCH 06/12] test fixes --- moviepy/video/VideoClip.py | 7 +++++-- tests/test_VideoClip.py | 6 ++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/moviepy/video/VideoClip.py b/moviepy/video/VideoClip.py index 7fba8714d..71b97d01a 100644 --- a/moviepy/video/VideoClip.py +++ b/moviepy/video/VideoClip.py @@ -791,7 +791,7 @@ def with_background_color(self, size=None, color=(0, 0, 0), pos=None, opacity=No pos Position of the clip in the final clip. 'center' is the default - col_opacity + opacity Parameter in 0..1 indicating the opacity of the colored background. """ @@ -825,7 +825,9 @@ def with_background_color(self, size=None, color=(0, 0, 0), pos=None, opacity=No return result @outplace - def with_updated_frame_function(self, frame_function: Callable[[float], np.ndarray]): + def with_updated_frame_function( + self, frame_function: Callable[[float], np.ndarray] + ): """Change the clip's ``get_frame``. Returns a copy of the VideoClip instance, with the frame_function @@ -854,6 +856,7 @@ def with_mask(self, mask: Union["VideoClip", str] = "auto"): if self.has_constant_size: mask = ColorClip(self.size, 1.0, is_mask=True) else: + def frame_function(t): return np.ones(self.get_frame(t).shape[:2], dtype=float) diff --git a/tests/test_VideoClip.py b/tests/test_VideoClip.py index e7c8528ab..53c04817b 100644 --- a/tests/test_VideoClip.py +++ b/tests/test_VideoClip.py @@ -236,7 +236,7 @@ def test_setaudio_with_audiofile(util): def test_setopacity(util, video): clip = video(start_time=0.2, end_time=0.6) clip = clip.with_opacity(0.5) - clip = clip.with_background_color(size=(1000, 1000), color=(0, 0, 255), col_opacity=0.8) + clip = clip.with_background_color(size=(1000, 1000), color=(0, 0, 255), opacity=0.8) location = os.path.join(util.TMP_DIR, "setopacity.mp4") clip.write_videofile(location) assert os.path.isfile(location) @@ -250,9 +250,7 @@ def test_with_layer_index(): reversed_composite_clip = CompositeVideoClip([top_clip, bottom_clip]) # Make sure that the order of clips makes no difference to the composite clip - assert composite_clip.subclipped(0, 2) == reversed_composite_clip.subclipped( - 0, 2 - ) + assert composite_clip.subclipped(0, 2) == reversed_composite_clip.subclipped(0, 2) # Make sure that only the 'top' clip is kept assert top_clip.subclipped(0, 2) == composite_clip.subclipped(0, 2) From aaf29856db100fda4567d45db3e589fc1d92cb17 Mon Sep 17 00:00:00 2001 From: Zulko Date: Sat, 23 Nov 2024 14:52:12 -0500 Subject: [PATCH 07/12] isort --- moviepy/video/VideoClip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moviepy/video/VideoClip.py b/moviepy/video/VideoClip.py index 71b97d01a..4bc3ed476 100644 --- a/moviepy/video/VideoClip.py +++ b/moviepy/video/VideoClip.py @@ -8,7 +8,7 @@ import os import threading from numbers import Real -from typing import TYPE_CHECKING, List, Union, Callable +from typing import TYPE_CHECKING, Callable, List, Union import numpy as np import proglog From fdaed8e7a3f178602c5edad8bcc30bd3806ae5b4 Mon Sep 17 00:00:00 2001 From: Zulko Date: Sat, 23 Nov 2024 20:40:24 -0500 Subject: [PATCH 08/12] tweaking examples everywhere --- README.md | 27 +++-- .../compositing/CompositeAudioClip.py | 22 ++-- .../compositing/CompositeVideoClip.py | 14 ++- .../user_guide/compositing/concatenate.py | 8 +- .../user_guide/compositing/crossfadein.py | 23 ++-- .../user_guide/compositing/juxtaposing.py | 13 ++- .../user_guide/compositing/with_position.py | 12 +- .../code/user_guide/compositing/with_start.py | 8 +- .../code/user_guide/effects/custom_effect.py | 6 +- .../user_guide/effects/image_transform.py | 2 + .../user_guide/effects/modify_copy_example.py | 12 +- .../code/user_guide/effects/time_transform.py | 12 +- .../code/user_guide/effects/transform.py | 3 +- .../code/user_guide/effects/using_effects.py | 13 +-- .../user_guide/effects/using_with_methods.py | 1 - .../code/user_guide/loading/AudioArrayClip.py | 25 ++-- .../code/user_guide/loading/AudioClip.py | 12 +- .../code/user_guide/loading/AudioFileClip.py | 1 - .../code/user_guide/loading/ColorClip.py | 12 +- .../code/user_guide/loading/DataVideoClip.py | 10 +- .../code/user_guide/loading/ImageClip.py | 10 +- .../user_guide/loading/ImageSequenceClip.py | 24 ++-- .../code/user_guide/loading/TextClip.py | 13 +-- .../user_guide/loading/UpdatedVideoClip.py | 26 +++-- .../code/user_guide/loading/VideoClip.py | 12 +- .../code/user_guide/loading/VideoFileClip.py | 8 +- .../code/user_guide/loading/closing.py | 3 +- .../code/user_guide/loading/loading.py | 60 ++++++---- docs/_static/code/user_guide/loading/masks.py | 2 +- docs/getting_started/install.rst | 4 +- moviepy/Clip.py | 26 +++-- moviepy/audio/AudioClip.py | 28 ++--- moviepy/audio/fx/AudioDelay.py | 22 ++-- moviepy/audio/fx/AudioFadeIn.py | 6 +- moviepy/audio/fx/AudioFadeOut.py | 6 +- moviepy/audio/fx/AudioLoop.py | 12 +- moviepy/audio/fx/MultiplyStereoVolume.py | 14 ++- moviepy/audio/fx/MultiplyVolume.py | 22 ++-- moviepy/audio/io/AudioFileClip.py | 6 +- moviepy/tools.py | 28 ++--- moviepy/video/VideoClip.py | 74 +++++++----- moviepy/video/fx/MasksAnd.py | 10 +- moviepy/video/fx/MasksOr.py | 10 +- moviepy/video/fx/Resize.py | 10 +- moviepy/video/fx/SlideIn.py | 28 ++--- moviepy/video/fx/SlideOut.py | 28 ++--- moviepy/video/io/VideoFileClip.py | 10 +- moviepy/video/io/display_in_notebook.py | 35 +++--- moviepy/video/tools/cuts.py | 110 ++++++++++-------- moviepy/video/tools/drawing.py | 84 ++++++------- moviepy/video/tools/interpolators.py | 12 +- moviepy/video/tools/subtitles.py | 20 ++-- 52 files changed, 550 insertions(+), 449 deletions(-) diff --git a/README.md b/README.md index 2db51c8ad..ca866c7c5 100644 --- a/README.md +++ b/README.md @@ -20,26 +20,29 @@ In this example we open a video file, select the subclip between 10 and result to a new file: ``` python -# Import everything needed to edit video clips -from moviepy import * +from moviepy import VideoFileClip, TextClip, CompositeVideoClip # Load file example.mp4 and keep only the subclip from 00:00:10 to 00:00:20 -clip = VideoFileClip("long_examples/example2.mp4").subclipped(10, 20) - # Reduce the audio volume to 80% of its original volume -clip = clip.with_volume_scaled(0.8) +clip = ( + VideoFileClip("long_examples/example2.mp4") + .subclipped(10, 20) + .with_volume_scaled(0.8) +) # Generate a text clip. You can customize the font, color, etc. -txt_clip = TextClip(font="example.ttf", text="Big Buck Bunny", font_size=70, color='white') -#The text clip should appear for 10s at the center of the screen -txt_clip = txt_clip.with_duration(10).with_position('center') -# Overlay the text clip on the first video clip -video = CompositeVideoClip([clip, txt_clip]) +txt_clip = TextClip( + font="Arial.ttf", + text="Hello there!", + font_size=70, + color='white' +).with_duration(10).with_position('center') -# Write the result to a file (many options available!) -video.write_videofile("result.mp4") +# Overlay the text clip on the first video clip +final_video = CompositeVideoClip([clip, txt_clip]) +final_video.write_videofile("result.mp4") ``` # Installation diff --git a/docs/_static/code/user_guide/compositing/CompositeAudioClip.py b/docs/_static/code/user_guide/compositing/CompositeAudioClip.py index 121dc4b24..dd8217e49 100644 --- a/docs/_static/code/user_guide/compositing/CompositeAudioClip.py +++ b/docs/_static/code/user_guide/compositing/CompositeAudioClip.py @@ -1,18 +1,22 @@ -from moviepy import * +"""Let's first concatenate (one after the other) then composite +(on top of each other) three audio clips.""" + +from moviepy import AudioFileClip, CompositeAudioClip, concatenate_audioclips # We load all the clips we want to compose -aclip1 = AudioFileClip("example.wav") -aclip2 = AudioFileClip("example2.wav") -aclip3 = AudioFileClip("example3.wav") +clip1 = AudioFileClip("example.wav") +clip2 = AudioFileClip("example2.wav") +clip3 = AudioFileClip("example3.wav") # All clip will play one after the other -concat = concatenate_audioclips([aclip1, aclip2, aclip3]) +concat = concatenate_audioclips([clip1, clip2, clip3]) -# We will play aclip1, then ontop of it aclip2 after 5s, and the aclip3 on top of both after 9s +# We will play clip1, then on top of it clip2 starting at t=5s, +# and clip3 on top of both starting t=9s compo = CompositeAudioClip( [ - aclip1.with_volume_scaled(1.2), - aclip2.with_start(5), # start at t=5s - aclip3.with_start(9), + clip1.with_volume_scaled(1.2), + clip2.with_start(5), # start at t=5s + clip3.with_start(9), ] ) diff --git a/docs/_static/code/user_guide/compositing/CompositeVideoClip.py b/docs/_static/code/user_guide/compositing/CompositeVideoClip.py index 5da6b5181..65937e689 100644 --- a/docs/_static/code/user_guide/compositing/CompositeVideoClip.py +++ b/docs/_static/code/user_guide/compositing/CompositeVideoClip.py @@ -1,10 +1,14 @@ -from moviepy import * +"""Let's stack three video clips on top of each other with +CompositeVideoClip.""" + +from moviepy import VideoFileClip, CompositeVideoClip # We load all the clips we want to compose -clip1 = VideoFileClip("example.mp4") -clip2 = VideoFileClip("example2.mp4").subclipped(0, 1) -clip3 = VideoFileClip("example3.mp4") +clip1 = VideoFileClip("some_background.mp4") +clip2 = VideoFileClip("some_video.mp4").subclipped(0, 1) +clip3 = VideoFileClip("some_moving_text.mp4") -# We concatenate them and write theme stacked on top of each other, with clip3 over clip2 over clip1 +# We concatenate them and write theme stacked on top of each other, +# with clip3 over clip2 over clip1 final_clip = CompositeVideoClip([clip1, clip2, clip3]) final_clip.write_videofile("final_clip.mp4") diff --git a/docs/_static/code/user_guide/compositing/concatenate.py b/docs/_static/code/user_guide/compositing/concatenate.py index 5bdaf0252..5025356ed 100644 --- a/docs/_static/code/user_guide/compositing/concatenate.py +++ b/docs/_static/code/user_guide/compositing/concatenate.py @@ -1,9 +1,11 @@ +"""Let's concatenate (play one after the other) three video clips.""" + from moviepy import VideoFileClip, concatenate_videoclips # We load all the clips we want to concatenate -clip1 = VideoFileClip("example.mp4") -clip2 = VideoFileClip("example2.mp4").subclipped(0, 1) -clip3 = VideoFileClip("example3.mp4") +clip1 = VideoFileClip("first_scene.mp4") +clip2 = VideoFileClip("second_scene.mp4").subclipped(0, 1) +clip3 = VideoFileClip("third_scene.mp4") # We concatenate them and write the result final_clip = concatenate_videoclips([clip1, clip2, clip3]) diff --git a/docs/_static/code/user_guide/compositing/crossfadein.py b/docs/_static/code/user_guide/compositing/crossfadein.py index 3127e1832..0c36713f8 100644 --- a/docs/_static/code/user_guide/compositing/crossfadein.py +++ b/docs/_static/code/user_guide/compositing/crossfadein.py @@ -1,18 +1,15 @@ -from moviepy import * +"""In this example, we will concatenate two clips with a 1-second +crossfadein of the second clip.""" + +from moviepy import VideoFileClip, CompositeVideoClip, vfx # We load all the clips we want to compose clip1 = VideoFileClip("example.mp4") -clip2 = VideoFileClip("example2.mp4").subclipped(0, 1) - -# Clip2 will be on top of clip1 for 1s -clip1 = clip1.with_end(2) -clip2 = clip2.with_start(1) - -# We will add a crossfadein on clip2 for 1s -# As the other effects, transitions are added to Clip methods at runtime -clip2 = clip2.with_effects([vfx.CrossFadeIn(1)]) - +clip2 = VideoFileClip("example2.mp4") -# We write the result -final_clip = CompositeVideoClip([clip1, clip2]) +clips = [ + clip1.with_end(2), + clip2.with_start(1).with_effects([vfx.CrossFadeIn(1)]), +] +final_clip = CompositeVideoClip(clips) final_clip.write_videofile("final_clip.mp4") diff --git a/docs/_static/code/user_guide/compositing/juxtaposing.py b/docs/_static/code/user_guide/compositing/juxtaposing.py index 3d9b6d9d9..3f032643e 100644 --- a/docs/_static/code/user_guide/compositing/juxtaposing.py +++ b/docs/_static/code/user_guide/compositing/juxtaposing.py @@ -1,3 +1,5 @@ +"""Let's juxtapose four video clips in a 2x2 grid.""" + from moviepy import VideoFileClip, clips_array, vfx # We will use the same clip and transform it in 3 ways @@ -8,9 +10,12 @@ # The form of the final clip will depend of the shape of the array # We want our clip to be our 4 videos, 2x2, so we make an array of 2x2 -final_clip = clips_array([[clip1, clip2], [clip3, clip4]]) -final_clip = final_clip.resized( - width=480 -) # We resize the resulting clip to have the dimensions we want +array = [ + [clip1, clip2], + [clip3, clip4], +] +final_clip = clips_array(array) +# let's resize the final clip so it has 480px of width +final_clip = final_clip.resized(width=480) final_clip.write_videofile("final_clip.mp4") diff --git a/docs/_static/code/user_guide/compositing/with_position.py b/docs/_static/code/user_guide/compositing/with_position.py index b020db854..9fca3ec41 100644 --- a/docs/_static/code/user_guide/compositing/with_position.py +++ b/docs/_static/code/user_guide/compositing/with_position.py @@ -1,4 +1,6 @@ -from moviepy import * +"""Let's position some text and images on a video.""" + +from moviepy import TextClip, VideoFileClip, CompositeVideoClip, ImageClip # We load all the clips we want to compose background = VideoFileClip("example2.mp4").subclipped(0, 2) @@ -28,8 +30,9 @@ ) logo = ImageClip("./example2.png", duration=1).resized(height=50) -# We want our title to be at the center horizontaly and start at 25% of the video verticaly -# We can set as "center", "left", "right", "top" and "bottom", and % relative from the clip size +# We want our title to be at the center horizontaly and start at 25% +# of the video verticaly. We can set as "center", "left", "right", +# "top" and "bottom", and % relative from the clip size title = title.with_position(("center", 0.25), relative=True) # We want the author to be in the center, 30px under the title @@ -42,7 +45,8 @@ copyright = copyright.with_position(("center", background.h - copyright.h - 30)) # Finally, we want the logo to be in the center, but to drop as time pass -# We can do so by setting position as a function that take time as argument, a lot like frame_function +# We can do so by setting position as a function that take time as argument, +# a lot like frame_function top = (background.h - logo.h) / 2 logo = logo.with_position(lambda t: ("center", top + t * 30)) diff --git a/docs/_static/code/user_guide/compositing/with_start.py b/docs/_static/code/user_guide/compositing/with_start.py index ae06bb59d..9d6bf84a1 100644 --- a/docs/_static/code/user_guide/compositing/with_start.py +++ b/docs/_static/code/user_guide/compositing/with_start.py @@ -1,4 +1,4 @@ -from moviepy import * +from moviepy import VideoFileClip, CompositeVideoClip # We load all the clips we want to compose clip1 = VideoFileClip("example.mp4") @@ -12,9 +12,9 @@ clip2 = clip2.with_start(1.5) # We want to play clip3 at the end of clip2, and so for 3 seconds only -clip3 = clip3.with_start(clip2.end).with_duration( - 1 -) # Some times its more practical to modify the duration of a clip instead of his end +# Some times its more practical to modify the duration of a clip instead +# of his end +clip3 = clip3.with_start(clip2.end).with_duration(1) # We write the result final_clip = CompositeVideoClip([clip1, clip2, clip3]) diff --git a/docs/_static/code/user_guide/effects/custom_effect.py b/docs/_static/code/user_guide/effects/custom_effect.py index 51bf04b95..bfe75a00b 100644 --- a/docs/_static/code/user_guide/effects/custom_effect.py +++ b/docs/_static/code/user_guide/effects/custom_effect.py @@ -1,3 +1,6 @@ +"""Let's write a custom effect that will add a basic progress bar +at the bottom of our clip.""" + from moviepy import VideoClip from moviepy.decorators import requires_duration @@ -22,7 +25,8 @@ def filter(get_frame, t): progression = t / clip.duration bar_width = int(progression * clip.w) - # Showing a progress bar is just replacing bottom pixels on some part of our frame + # Showing a progress bar is just replacing bottom pixels + # on some part of our frame frame = get_frame(t) frame[-height:, 0:bar_width] = color diff --git a/docs/_static/code/user_guide/effects/image_transform.py b/docs/_static/code/user_guide/effects/image_transform.py index 2983b8125..1d2b6cde7 100644 --- a/docs/_static/code/user_guide/effects/image_transform.py +++ b/docs/_static/code/user_guide/effects/image_transform.py @@ -1,3 +1,5 @@ +"""Let's invert the green and blue channels of a video.""" + from moviepy import VideoFileClip import numpy diff --git a/docs/_static/code/user_guide/effects/modify_copy_example.py b/docs/_static/code/user_guide/effects/modify_copy_example.py index d965b0a03..6cb001c11 100644 --- a/docs/_static/code/user_guide/effects/modify_copy_example.py +++ b/docs/_static/code/user_guide/effects/modify_copy_example.py @@ -1,19 +1,21 @@ # Import everything needed to edit video clips -from moviepy import * +from moviepy import VideoFileClip # Load example.mp4 clip = VideoFileClip("example.mp4") -# This does nothing, as multiply_volume will return a copy of clip which you will loose immediatly as you dont store it +# This does nothing, as multiply_volume will return a copy of clip +# which you will loose immediatly as you dont store it # If you was to render clip now, the audio would still be at full volume clip.with_volume_scaled(0.1) -# This create a copy of clip in clip_whisper with a volume of only 10% the original, but does not modify the original clip +# This create a copy of clip in clip_whisper with a volume of only 10% the original, +# but does not modify the original clip # If you was to render clip right now, the audio would still be at full volume # If you was to render clip_whisper, the audio would be a 10% of the original volume clip_whisper = clip.with_volume_scaled(0.1) -# This replace the original clip with a copy of it where volume is only 10% of the original -# If you was to render clip now, the audio would be at 10% +# This replace the original clip with a copy of it where volume is only 10% of +# the original. If you was to render clip now, the audio would be at 10% # The original clip is now lost clip = clip.with_volume_scaled(0.1) diff --git a/docs/_static/code/user_guide/effects/time_transform.py b/docs/_static/code/user_guide/effects/time_transform.py index c5ccad256..86e837111 100644 --- a/docs/_static/code/user_guide/effects/time_transform.py +++ b/docs/_static/code/user_guide/effects/time_transform.py @@ -3,13 +3,7 @@ my_clip = VideoFileClip("example.mp4") - -# You can define a function the classical way -def accel_x3(time: float) -> float: - return time * 3 - - -modified_clip1 = my_clip.time_transform(accel_x3) - -# Of you can also use lambda function +# Let's accelerate the video by a factor of 3 +modified_clip1 = my_clip.time_transform(lambda t: t * 3) +# Let's play the video back and forth with a "sine" time-warping effect modified_clip2 = my_clip.time_transform(lambda t: 1 + math.sin(t)) diff --git a/docs/_static/code/user_guide/effects/transform.py b/docs/_static/code/user_guide/effects/transform.py index 38678f865..2a9d0410c 100644 --- a/docs/_static/code/user_guide/effects/transform.py +++ b/docs/_static/code/user_guide/effects/transform.py @@ -1,5 +1,6 @@ +"""Let's create a scolling video effect from scratch.""" + from moviepy import VideoFileClip -import math my_clip = VideoFileClip("example.mp4") diff --git a/docs/_static/code/user_guide/effects/using_effects.py b/docs/_static/code/user_guide/effects/using_effects.py index 8cf6cc933..06a52006f 100644 --- a/docs/_static/code/user_guide/effects/using_effects.py +++ b/docs/_static/code/user_guide/effects/using_effects.py @@ -2,14 +2,13 @@ from moviepy import vfx, afx myclip = VideoFileClip("example.mp4") -myclip = myclip.with_effects( - [vfx.Resize(width=460)] -) # resize clip to be 460px in width, keeping aspect ratio +# resize clip to be 460px in width, keeping aspect ratio +myclip = myclip.with_effects([vfx.Resize(width=460)]) # fx method return a copy of the clip, so we can easily chain them -myclip = myclip.with_effects( - [vfx.MultiplySpeed(2), afx.MultiplyVolume(0.5)] -) # double the speed and half the audio volume +# double the speed and half the audio volume +myclip = myclip.with_effects([vfx.MultiplySpeed(2), afx.MultiplyVolume(0.5)]) -# because effects are added to Clip at runtime, you can also call them directly from your clip as methods +# because effects are added to Clip at runtime, you can also call +# them directly from your clip as methods myclip = myclip.with_effects([vfx.MultiplyColor(0.5)]) # darken the clip diff --git a/docs/_static/code/user_guide/effects/using_with_methods.py b/docs/_static/code/user_guide/effects/using_with_methods.py index 0267dc189..eae952147 100644 --- a/docs/_static/code/user_guide/effects/using_with_methods.py +++ b/docs/_static/code/user_guide/effects/using_with_methods.py @@ -1,5 +1,4 @@ from moviepy import VideoFileClip -from moviepy import vfx, afx myclip = VideoFileClip("example.mp4") myclip = myclip.with_end(5) # stop the clip after 5 sec diff --git a/docs/_static/code/user_guide/loading/AudioArrayClip.py b/docs/_static/code/user_guide/loading/AudioArrayClip.py index 6a02d41a0..fafc80269 100644 --- a/docs/_static/code/user_guide/loading/AudioArrayClip.py +++ b/docs/_static/code/user_guide/loading/AudioArrayClip.py @@ -1,5 +1,7 @@ +"""Let's create an audioclip from values in a numpy array.""" + import numpy as np -from moviepy import * +from moviepy import AudioArrayClip # We want to play those notes notes = {"A": 440, "B": 494, "C": 523, "D": 587, "E": 659, "F": 698} @@ -9,26 +11,25 @@ sample_rate = 44100 # Number of samples per second note_size = int(note_duration * sample_rate) -total_size = note_size * len(notes) +n_frames = note_size * len(notes) def frame_function(t, note_frequency): return np.sin(note_frequency * 2 * np.pi * t) -# We generate all frames timepoints -times = np.linspace(0, total_duration, total_size) +# At this point one could use this audioclip which generates the audio on the fly +# clip = AudioFileClip(frame_function) -# We make an array of size N*1, where N is the number of frames * total duration -audio_array = np.zeros((total_size, 2)) -i = 0 -for note, frequency in notes.items(): - for _ in range(note_size): - audio_array[i][0] = frame_function(times[i], frequency) - i += 1 +# We generate all frames timepoints +audio_frame_values = [ + 2 * [frame_function(t, freq)] + for freq in notes.values() + for t in np.arange(0, note_duration, 1.0 / sample_rate) +] # Create an AudioArrayClip from the audio samples -audio_clip = AudioArrayClip(audio_array, fps=sample_rate) +audio_clip = AudioArrayClip(np.array(audio_frame_values), fps=sample_rate) # Write the audio clip to a WAV file audio_clip.write_audiofile("result.wav", fps=44100) diff --git a/docs/_static/code/user_guide/loading/AudioClip.py b/docs/_static/code/user_guide/loading/AudioClip.py index fe1785c8e..34af669af 100644 --- a/docs/_static/code/user_guide/loading/AudioClip.py +++ b/docs/_static/code/user_guide/loading/AudioClip.py @@ -1,8 +1,10 @@ -from moviepy import * +from moviepy import AudioClip import numpy as np -# Producing a sinewave of 440 Hz -> note A -frame_function_audio = lambda t: np.sin(440 * 2 * np.pi * t) -# AUDIO CLIPS -clip = AudioClip(frame_function_audio, duration=3) +def audio_frame(t): + """Producing a sinewave of 440 Hz -> note A""" + return np.sin(440 * 2 * np.pi * t) + + +audio_clip = AudioClip(frame_function=audio_frame, duration=3) diff --git a/docs/_static/code/user_guide/loading/AudioFileClip.py b/docs/_static/code/user_guide/loading/AudioFileClip.py index e9aecd89f..07825956f 100644 --- a/docs/_static/code/user_guide/loading/AudioFileClip.py +++ b/docs/_static/code/user_guide/loading/AudioFileClip.py @@ -1,5 +1,4 @@ from moviepy import * -import numpy as np # Works for audio files, but also videos file where you only want the keep the audio track clip = AudioFileClip("example.wav") diff --git a/docs/_static/code/user_guide/loading/ColorClip.py b/docs/_static/code/user_guide/loading/ColorClip.py index 9bf5d9677..9c85d962b 100644 --- a/docs/_static/code/user_guide/loading/ColorClip.py +++ b/docs/_static/code/user_guide/loading/ColorClip.py @@ -1,8 +1,6 @@ -from moviepy import * +from moviepy import ColorClip -myclip = ColorClip( - size=(200, 100), color=(255, 0, 0), duration=1 -) # Color is passed as a RGB tuple -myclip.write_videofile( - "result.mp4", fps=1 -) # We really dont need more than 1 fps do we ? +# Color is passed as a RGB tuple +myclip = ColorClip(size=(200, 100), color=(255, 0, 0), duration=1) +# We really dont need more than 1 fps do we ? +myclip.write_videofile("result.mp4", fps=1) diff --git a/docs/_static/code/user_guide/loading/DataVideoClip.py b/docs/_static/code/user_guide/loading/DataVideoClip.py index 3d59498f1..096406207 100644 --- a/docs/_static/code/user_guide/loading/DataVideoClip.py +++ b/docs/_static/code/user_guide/loading/DataVideoClip.py @@ -1,4 +1,6 @@ -from moviepy import * +"""Let's make a clip where frames depend on values in a list""" + +from moviepy import DataVideoClip import numpy as np # Dataset will just be a list of colors as RGB @@ -12,13 +14,15 @@ ] -# The function make frame take data and create an image of 200x100 px fill with the color +# The function make frame take data and create an image of 200x100 px +# filled with the color given in the dataset def frame_function(data): frame = np.full((100, 200, 3), data, dtype=np.uint8) return frame -# We create the DataVideoClip, and we set FPS at 2, making a 3s clip (because len(dataset) = 6, so 6/2=3) +# We create the DataVideoClip, and we set FPS at 2, making a 3s clip +# (because len(dataset) = 6, so 6/2=3) myclip = DataVideoClip(data=dataset, data_to_frame=frame_function, fps=2) # Modifying fps here will change video FPS, not clip FPS diff --git a/docs/_static/code/user_guide/loading/ImageClip.py b/docs/_static/code/user_guide/loading/ImageClip.py index f704a5500..1c5733026 100644 --- a/docs/_static/code/user_guide/loading/ImageClip.py +++ b/docs/_static/code/user_guide/loading/ImageClip.py @@ -1,4 +1,7 @@ -from moviepy import * +"""Here's how you transform a VideoClip into an ImageClip from an image, from +arbitrary data, or by extracting a frame at a given time""" + +from moviepy import ImageClip, VideoFileClip import numpy as np # Random RGB noise image of 200x100 @@ -6,6 +9,5 @@ myclip1 = ImageClip("example.png") # You can create it from a path myclip2 = ImageClip(noise_image) # from a (height x width x 3) RGB numpy array -myclip3 = VideoFileClip("./example.mp4").to_ImageClip( - t="00:00:01" -) # Or load videoclip and extract frame at a given time +# Or load videoclip and extract frame at a given time +myclip3 = VideoFileClip("./example.mp4").to_ImageClip(t="00:00:01") diff --git a/docs/_static/code/user_guide/loading/ImageSequenceClip.py b/docs/_static/code/user_guide/loading/ImageSequenceClip.py index a19432987..020324002 100644 --- a/docs/_static/code/user_guide/loading/ImageSequenceClip.py +++ b/docs/_static/code/user_guide/loading/ImageSequenceClip.py @@ -1,4 +1,4 @@ -from moviepy import * +from moviepy import ImageSequenceClip # A clip with a list of images showed for 1 second each myclip = ImageSequenceClip( @@ -9,19 +9,19 @@ ], durations=[1, 1, 1], ) -print( - "Clip duration: {}".format(myclip.duration) -) # 3 images, 1 seconds each, duration = 3 -print("Clip fps: {}".format(myclip.fps)) # 3 seconds, 3 images, fps is 3/3 = 1 +# 3 images, 1 seconds each, duration = 3 +print("Clip duration: {}".format(myclip.duration)) +# 3 seconds, 3 images, fps is 3/3 = 1 +print("Clip fps: {}".format(myclip.fps)) -# This time we will load all images in the dir, and instead of showing theme for X seconds, we will define FPS +# This time we will load all images in the dir, and instead of showing theme +# for X seconds, we will define FPS myclip2 = ImageSequenceClip("./example_img_dir", fps=30) -print( - "Clip duration: {}".format(myclip2.duration) -) # fps = 30, so duration = nb images in dir / 30 +# fps = 30, so duration = nb images in dir / 30 +print("Clip duration: {}".format(myclip2.duration)) print("Clip fps: {}".format(myclip2.fps)) # fps = 30 +# the gif will be 30 fps, its duration will depend on the number of +# images in dir myclip.write_gif("result.gif") # the gif will be 3 sec and 1 fps -myclip2.write_gif( - "result2.gif" -) # the gif will be 30 fps, duration will vary based on number of images in dir +myclip2.write_gif("result2.gif") diff --git a/docs/_static/code/user_guide/loading/TextClip.py b/docs/_static/code/user_guide/loading/TextClip.py index c3dd23105..490775487 100644 --- a/docs/_static/code/user_guide/loading/TextClip.py +++ b/docs/_static/code/user_guide/loading/TextClip.py @@ -1,4 +1,4 @@ -from moviepy import * +from moviepy import TextClip font = "./example.ttf" @@ -8,11 +8,10 @@ font=font, text="Hello World !", font_size=30, - color="#FF0000", + color="#FF0000", # Red bg_color="#FFFFFF", duration=2, -) # Red - +) # This time we load text from a file, we set a fixed size for clip and let the system find best font size, # allowing for line breaking txt_clip2 = TextClip( @@ -26,8 +25,6 @@ # we set duration, because by default image clip are infinite, and we cannot render infinite txt_clip2 = txt_clip2.with_duration(2) - -txt_clip1.write_videofile( - "result1.mp4", fps=24 -) # ImageClip have no FPS either, so we must defined it +# ImageClip have no FPS either, so we must defined it +txt_clip1.write_videofile("result1.mp4", fps=24) txt_clip2.write_videofile("result2.mp4", fps=24) diff --git a/docs/_static/code/user_guide/loading/UpdatedVideoClip.py b/docs/_static/code/user_guide/loading/UpdatedVideoClip.py index c45f7192a..c9a7a4920 100644 --- a/docs/_static/code/user_guide/loading/UpdatedVideoClip.py +++ b/docs/_static/code/user_guide/loading/UpdatedVideoClip.py @@ -1,17 +1,20 @@ -from moviepy import * +from moviepy import UpdatedVideoClip import numpy as np import random -# Imagine we want to make a video that become more and more red as we repeat same face on coinflip in a row -# because coinflip are done in real time, we need to wait until a winning row is done to be able -# to make the next frame. -# This is a world simulating that. Sorry, it's hard to come up with examples... class CoinFlipWorld: + """A simulation of coin flipping. + + Imagine we want to make a video that become more and more red as we repeat same face + on coinflip in a row because coinflip are done in real time, we need to wait + until a winning row is done to be able to make the next frame. + This is a world simulating that. Sorry, it's hard to come up with examples...""" + def __init__(self, fps): """ - FPS is usefull because we must increment clip_t by 1/FPS to have UpdatedVideoClip run with a certain FPS - + FPS is usefull because we must increment clip_t by 1/FPS to have + UpdatedVideoClip run with a certain FPS """ self.clip_t = 0 self.win_strike = 0 @@ -41,9 +44,9 @@ def update(self): self.clip_t += 1 / self.fps def to_frame(self): - red_intensity = 255 * ( - self.win_strike / 10 - ) # 100% red for 10 victories and more + """Return a frame of a 200x100 image with red more or less intense based + on number of victories in a row.""" + red_intensity = 255 * (self.win_strike / 10) red_intensity = min(red_intensity, 255) # A 200x100 image with red more or less intense based on number of victories in a row @@ -53,6 +56,7 @@ def to_frame(self): world = CoinFlipWorld(fps=5) myclip = UpdatedVideoClip(world=world, duration=10) -# We will set FPS to same as world, if we was to use a different FPS, the lowest from world.fps and our write_videofile fps param +# We will set FPS to same as world, if we was to use a different FPS, +# the lowest from world.fps and our write_videofile fps param # will be the real visible fps myclip.write_videofile("result.mp4", fps=5) diff --git a/docs/_static/code/user_guide/loading/VideoClip.py b/docs/_static/code/user_guide/loading/VideoClip.py index 2385eee3b..5952af359 100644 --- a/docs/_static/code/user_guide/loading/VideoClip.py +++ b/docs/_static/code/user_guide/loading/VideoClip.py @@ -1,6 +1,6 @@ from PIL import Image, ImageDraw import numpy as np -from moviepy import * +from moviepy import VideoClip import math WIDTH, HEIGHT = (128, 128) @@ -24,9 +24,7 @@ def frame_function(t): return np.array(img) # returns a 8-bit RGB array -clip = VideoClip( - frame_function, duration=2 -) # we define a 2s duration for the clip to be able to render it later -clip.write_gif( - "circle.gif", fps=15 -) # we must set a framerate because VideoClip have no framerate by default +# we define a 2s duration for the clip to be able to render it later +clip = VideoClip(frame_function, duration=2) +# we must set a framerate because VideoClip have no framerate by default +clip.write_gif("circle.gif", fps=15) diff --git a/docs/_static/code/user_guide/loading/VideoFileClip.py b/docs/_static/code/user_guide/loading/VideoFileClip.py index 2d24a8786..297af7175 100644 --- a/docs/_static/code/user_guide/loading/VideoFileClip.py +++ b/docs/_static/code/user_guide/loading/VideoFileClip.py @@ -1,4 +1,4 @@ -from moviepy import * +from moviepy import VideoFileClip myclip = VideoFileClip("example.mp4") @@ -9,7 +9,5 @@ myclip = myclip.subclipped(0.5, 2) # Cutting the clip between 0.5 and 2 secs. print("Clip duration: {}".format(myclip.duration)) # Cuting will update duration print("Clip fps: {}".format(myclip.fps)) # and keep fps - -myclip.write_videofile( - "result.mp4" -) # the output video will be 1.5 sec long and use original fps +# the output video will be 1.5 sec long and use original fps +myclip.write_videofile("result.mp4") diff --git a/docs/_static/code/user_guide/loading/closing.py b/docs/_static/code/user_guide/loading/closing.py index c8d818ff1..d0e38d08f 100644 --- a/docs/_static/code/user_guide/loading/closing.py +++ b/docs/_static/code/user_guide/loading/closing.py @@ -1,8 +1,9 @@ from moviepy import * +# clip.close() is implicitly called, so the lock on my_audiofile.mp3 file +# is immediately released. try: with AudioFileClip("example.wav") as clip: raise Exception("Let's simulate an exception") except Exception as e: print("{}".format(e)) -# clip.close() is implicitly called, so the lock on my_audiofile.mp3 file is immediately released. diff --git a/docs/_static/code/user_guide/loading/loading.py b/docs/_static/code/user_guide/loading/loading.py index 0166cb944..3ebc677ed 100644 --- a/docs/_static/code/user_guide/loading/loading.py +++ b/docs/_static/code/user_guide/loading/loading.py @@ -1,35 +1,47 @@ -from moviepy import * +from moviepy import ( + VideoClip, + VideoFileClip, + ImageSequenceClip, + ImageClip, + TextClip, + ColorClip, + AudioFileClip, + AudioClip, +) import numpy as np # Define some constants for later use black = (255, 255, 255) # RGB for black -# Random noise image of 200x100 -frame_function = lambda t: np.random.randint(low=0, high=255, size=(100, 200, 3)) -# A note by producing a sinewave of 440 Hz -frame_function_audio = lambda t: np.sin(440 * 2 * np.pi * t) + + +def frame_function(t): + """Random noise image of 200x100""" + return np.random.randint(low=0, high=255, size=(100, 200, 3)) + + +def frame_function_audio(t): + """A note by producing a sinewave of 440 Hz""" + return np.sin(440 * 2 * np.pi * t) + # Now lets see how to load different type of resources ! -# VIDEO CLIPS` -clip = VideoClip( - frame_function, duration=5 -) # for custom animations, where frame_function is a function returning an image as numpy array for a given time +# VIDEO CLIPS +# for custom animations, where frame_function is a function returning an image +# as numpy array for a given time +clip = VideoClip(frame_function, duration=5) clip = VideoFileClip("example.mp4") # for videos -clip = ImageSequenceClip( - "example_img_dir", fps=24 -) # for a list or directory of images to be used as a video sequence +# for a list or directory of images to be used as a video sequence +clip = ImageSequenceClip("example_img_dir", fps=24) clip = ImageClip("example.png") # For a picture -clip = TextClip( - font="./example.ttf", text="Hello!", font_size=70, color="black" -) # To create the image of a text -clip = ColorClip( - size=(460, 380), color=black -) # a clip of a single unified color, where color is a RGB tuple/array/list +# To create the image of a text +clip = TextClip(font="./example.ttf", text="Hello!", font_size=70, color="black") +# a clip of a single unified color, where color is a RGB tuple/array/list +clip = ColorClip(size=(460, 380), color=black) # AUDIO CLIPS -clip = AudioFileClip( - "example.wav" -) # for audio files, but also videos where you only want the keep the audio track -clip = AudioClip( - frame_function_audio, duration=3 -) # for custom audio, where frame_function is a function returning a float (or tuple for stereo) for a given time +# for audio files, but also videos where you only want the keep the audio track +clip = AudioFileClip("example.wav") +# for custom audio, where frame_function is a function returning a +# float (or tuple for stereo) for a given time +clip = AudioClip(frame_function_audio, duration=3) diff --git a/docs/_static/code/user_guide/loading/masks.py b/docs/_static/code/user_guide/loading/masks.py index 233313a20..a58f48de5 100644 --- a/docs/_static/code/user_guide/loading/masks.py +++ b/docs/_static/code/user_guide/loading/masks.py @@ -1,4 +1,4 @@ -from moviepy import * +from moviepy import VideoClip, ImageClip, VideoFileClip import numpy as np # Random RGB noise image of 200x100 diff --git a/docs/getting_started/install.rst b/docs/getting_started/install.rst index d4b03d31a..08c83ccd0 100644 --- a/docs/getting_started/install.rst +++ b/docs/getting_started/install.rst @@ -80,8 +80,8 @@ To test if FFmpeg and FFplay are found by MoviePy, in a Python console, you can .. code-block:: python - >>> from moviepy.config import check - >>> check() + from moviepy.config import check + check() .. _ffmpeg: https://www.ffmpeg.org/download.html diff --git a/moviepy/Clip.py b/moviepy/Clip.py index 4bdd95839..07833a38a 100644 --- a/moviepy/Clip.py +++ b/moviepy/Clip.py @@ -170,11 +170,13 @@ def time_transform(self, time_func, apply_to=None, keep_duration=False): Examples -------- - >>> # plays the clip (and its mask and sound) twice faster - >>> new_clip = clip.time_transform(lambda t: 2*t, apply_to=['mask', 'audio']) - >>> - >>> # plays the clip starting at t=3, and backwards: - >>> new_clip = clip.time_transform(lambda t: 3-t) + .. code:: python + + # plays the clip (and its mask and sound) twice faster + new_clip = clip.time_transform(lambda t: 2*t, apply_to=['mask', 'audio']) + + # plays the clip starting at t=3, and backwards: + new_clip = clip.time_transform(lambda t: 3-t) """ if apply_to is None: @@ -512,12 +514,14 @@ def iter_frames(self, fps=None, with_times=False, logger=None, dtype=None): Examples -------- - >>> # prints the maximum of red that is contained - >>> # on the first line of each frame of the clip. - >>> from moviepy import VideoFileClip - >>> myclip = VideoFileClip('myvideo.mp4') - >>> print ( [frame[0,:,0].max() - for frame in myclip.iter_frames()]) + # prints the maximum of red that is contained + # on the first line of each frame of the clip. + .. code:: python + + from moviepy import VideoFileClip + myclip = VideoFileClip('myvideo.mp4') + print([frame[0,:,0].max() + for frame in myclip.iter_frames()]) """ logger = proglog.default_bar_logger(logger) for frame_index in logger.iter_bar( diff --git a/moviepy/audio/AudioClip.py b/moviepy/audio/AudioClip.py index 6c8ef7b94..edcaf4888 100644 --- a/moviepy/audio/AudioClip.py +++ b/moviepy/audio/AudioClip.py @@ -47,19 +47,21 @@ class AudioClip(Clip): Examples -------- - >>> # Plays the note A in mono (a sine wave of frequency 440 Hz) - >>> import numpy as np - >>> frame_function = lambda t: np.sin(440 * 2 * np.pi * t) - >>> clip = AudioClip(frame_function, duration=5, fps=44100) - >>> clip.preview() - - >>> # Plays the note A in stereo (two sine waves of frequencies 440 and 880 Hz) - >>> frame_function = lambda t: np.array([ - ... np.sin(440 * 2 * np.pi * t), - ... np.sin(880 * 2 * np.pi * t) - ... ]).T.copy(order="C") - >>> clip = AudioClip(frame_function, duration=3, fps=44100) - >>> clip.preview() + .. code:: python + + # Plays the note A in mono (a sine wave of frequency 440 Hz) + import numpy as np + frame_function = lambda t: np.sin(440 * 2 * np.pi * t) + clip = AudioClip(frame_function, duration=5, fps=44100) + clip.preview() + + # Plays the note A in stereo (two sine waves of frequencies 440 and 880 Hz) + frame_function = lambda t: np.array([ + np.sin(440 * 2 * np.pi * t), + np.sin(880 * 2 * np.pi * t) + ]).T.copy(order="C") + clip = AudioClip(frame_function, duration=3, fps=44100) + clip.preview() """ diff --git a/moviepy/audio/fx/AudioDelay.py b/moviepy/audio/fx/AudioDelay.py index 2deca2268..d1611d07b 100644 --- a/moviepy/audio/fx/AudioDelay.py +++ b/moviepy/audio/fx/AudioDelay.py @@ -34,17 +34,19 @@ class AudioDelay(Effect): Examples -------- - >>> from moviepy import * - >>> videoclip = AudioFileClip('myaudio.wav').with_effects([ - ... afx.AudioDelay(offset=.2, n_repeats=10, decayment=.2) - ... ]) + .. code:: python - >>> # stereo A note - >>> frame_function = lambda t: np.array( - ... [np.sin(440 * 2 * np.pi * t), np.sin(880 * 2 * np.pi * t)] - ... ).T - ... clip = AudioClip(frame_function=frame_function, duration=0.1, fps=44100) - ... clip = clip.with_effects([afx.AudioDelay(offset=.2, n_repeats=11, decay=0)]) + from moviepy import * + videoclip = AudioFileClip('myaudio.wav').with_effects([ + afx.AudioDelay(offset=.2, n_repeats=10, decayment=.2) + ]) + + # stereo A note + frame_function = lambda t: np.array( + [np.sin(440 * 2 * np.pi * t), np.sin(880 * 2 * np.pi * t)] + ).T + clip = AudioClip(frame_function=frame_function, duration=0.1, fps=44100) + clip = clip.with_effects([afx.AudioDelay(offset=.2, n_repeats=11, decay=0)]) """ offset: float = 0.2 diff --git a/moviepy/audio/fx/AudioFadeIn.py b/moviepy/audio/fx/AudioFadeIn.py index efae20900..bcba677e6 100644 --- a/moviepy/audio/fx/AudioFadeIn.py +++ b/moviepy/audio/fx/AudioFadeIn.py @@ -22,8 +22,10 @@ class AudioFadeIn(Effect): Examples -------- - >>> clip = VideoFileClip("media/chaplin.mp4") - >>> clip.with_effects([vfx.AudioFadeIn("00:00:06")]) + .. code:: python + + clip = VideoFileClip("media/chaplin.mp4") + clip.with_effects([afx.AudioFadeIn("00:00:06")]) """ duration: float diff --git a/moviepy/audio/fx/AudioFadeOut.py b/moviepy/audio/fx/AudioFadeOut.py index b2d9e1560..555d9ccc6 100644 --- a/moviepy/audio/fx/AudioFadeOut.py +++ b/moviepy/audio/fx/AudioFadeOut.py @@ -23,8 +23,10 @@ class AudioFadeOut(Effect): Examples -------- - >>> clip = VideoFileClip("media/chaplin.mp4") - >>> clip.with_effects([afx.AudioFadeOut("00:00:06")]) + .. code:: python + + clip = VideoFileClip("media/chaplin.mp4") + clip.with_effects([afx.AudioFadeOut("00:00:06")]) """ duration: float diff --git a/moviepy/audio/fx/AudioLoop.py b/moviepy/audio/fx/AudioLoop.py index ceea293c6..156db132c 100644 --- a/moviepy/audio/fx/AudioLoop.py +++ b/moviepy/audio/fx/AudioLoop.py @@ -16,11 +16,13 @@ class AudioLoop(Effect): Examples -------- - >>> from moviepy import * - >>> videoclip = VideoFileClip('myvideo.mp4') - >>> music = AudioFileClip('music.ogg') - >>> audio = music.with_effects([afx.AudioLoop(duration=videoclip.duration)]) - >>> videoclip.with_audio(audio) + .. code:: python + + from moviepy import * + videoclip = VideoFileClip('myvideo.mp4') + music = AudioFileClip('music.ogg') + audio = music.with_effects([afx.AudioLoop(duration=videoclip.duration)]) + videoclip.with_audio(audio) """ diff --git a/moviepy/audio/fx/MultiplyStereoVolume.py b/moviepy/audio/fx/MultiplyStereoVolume.py index 2bc4d9649..b800a360d 100644 --- a/moviepy/audio/fx/MultiplyStereoVolume.py +++ b/moviepy/audio/fx/MultiplyStereoVolume.py @@ -15,12 +15,14 @@ class MultiplyStereoVolume(Effect): Examples -------- - >>> from moviepy import AudioFileClip - >>> music = AudioFileClip('music.ogg') - >>> # mutes left channel - >>> audio_r = music.with_effects([afx.MultiplyStereoVolume(left=0, right=1)]) - >>> # halves audio volume - >>> audio_h = music.with_effects([afx.MultiplyStereoVolume(left=0.5, right=0.5)]) + .. code:: python + + from moviepy import AudioFileClip + music = AudioFileClip('music.ogg') + # mutes left channel + audio_r = music.with_effects([afx.MultiplyStereoVolume(left=0, right=1)]) + # halves audio volume + audio_h = music.with_effects([afx.MultiplyStereoVolume(left=0.5, right=0.5)]) """ left: float = 1 diff --git a/moviepy/audio/fx/MultiplyVolume.py b/moviepy/audio/fx/MultiplyVolume.py index 3f7c20f9a..9030ebbea 100644 --- a/moviepy/audio/fx/MultiplyVolume.py +++ b/moviepy/audio/fx/MultiplyVolume.py @@ -30,16 +30,18 @@ class MultiplyVolume(Effect): Examples -------- - >>> from moviepy import AudioFileClip - >>> - >>> music = AudioFileClip("music.ogg") - >>> # doubles audio volume - >>> doubled_audio_clip = music.with_effects([afx.MultiplyVolume(2)]) - >>> # halves audio volume - >>> half_audio_clip = music.with_effects([afx.MultiplyVolume(0.5)]) - >>> # silences clip during one second at third - >>> effect = afx.MultiplyVolume(0, start_time=2, end_time=3) - >>> silenced_clip = clip.with_effects([effect]) + .. code:: python + + from moviepy import AudioFileClip + + music = AudioFileClip("music.ogg") + # doubles audio volume + doubled_audio_clip = music.with_effects([afx.MultiplyVolume(2)]) + # halves audio volume + half_audio_clip = music.with_effects([afx.MultiplyVolume(0.5)]) + # silences clip during one second at third + effect = afx.MultiplyVolume(0, start_time=2, end_time=3) + silenced_clip = clip.with_effects([effect]) """ factor: float diff --git a/moviepy/audio/io/AudioFileClip.py b/moviepy/audio/io/AudioFileClip.py index f23bd5706..67c30d069 100644 --- a/moviepy/audio/io/AudioFileClip.py +++ b/moviepy/audio/io/AudioFileClip.py @@ -49,8 +49,10 @@ class AudioFileClip(AudioClip): Examples -------- - >>> snd = AudioFileClip("song.wav") - >>> snd.close() + .. code:: python + + snd = AudioFileClip("song.wav") + snd.close() """ @convert_path_to_string("filename") diff --git a/moviepy/tools.py b/moviepy/tools.py index c2bc34724..6270cf4db 100644 --- a/moviepy/tools.py +++ b/moviepy/tools.py @@ -58,19 +58,21 @@ def convert_to_seconds(time): Here are the accepted formats: - >>> convert_to_seconds(15.4) # seconds - 15.4 - >>> convert_to_seconds((1, 21.5)) # (min,sec) - 81.5 - >>> convert_to_seconds((1, 1, 2)) # (hr, min, sec) - 3662 - >>> convert_to_seconds('01:01:33.045') - 3693.045 - >>> convert_to_seconds('01:01:33,5') # coma works too - 3693.5 - >>> convert_to_seconds('1:33,5') # only minutes and secs - 99.5 - >>> convert_to_seconds('33.5') # only secs + .. code:: python + + convert_to_seconds(15.4) # seconds + 15.4 + convert_to_seconds((1, 21.5)) # (min,sec) + 81.5 + convert_to_seconds((1, 1, 2)) # (hr, min, sec) + 3662 + convert_to_seconds('01:01:33.045') + 3693.045 + convert_to_seconds('01:01:33,5') # coma works too + 3693.5 + convert_to_seconds('1:33,5') # only minutes and secs + 99.5 + convert_to_seconds('33.5') # only secs 33.5 """ factors = (1, 60, 3600) diff --git a/moviepy/video/VideoClip.py b/moviepy/video/VideoClip.py index 4bc3ed476..56d304e2d 100644 --- a/moviepy/video/VideoClip.py +++ b/moviepy/video/VideoClip.py @@ -326,10 +326,12 @@ def write_videofile( Examples -------- - >>> from moviepy import VideoFileClip - >>> clip = VideoFileClip("myvideo.mp4").subclipped(100,120) - >>> clip.write_videofile("my_new_video.mp4") - >>> clip.close() + .. code:: python + + from moviepy import VideoFileClip + clip = VideoFileClip("myvideo.mp4").subclipped(100,120) + clip.write_videofile("my_new_video.mp4") + clip.close() """ name, ext = os.path.splitext(os.path.basename(filename)) @@ -502,8 +504,10 @@ def write_gif( only change the frame rate). If you want the gif to be played slower than the clip you will use :: - >>> # slow down clip 50% and make it a gif - >>> myClip.multiply_speed(0.5).to_gif('myClip.gif') + .. code:: python + + # slow down clip 50% and make it a gif + myClip.multiply_speed(0.5).to_gif('myClip.gif') """ # A little sketchy at the moment, maybe move all that in write_gif, @@ -538,10 +542,12 @@ def show(self, t=0, with_mask=True): Examples -------- - >>> from moviepy import * - >>> - >>> clip = VideoFileClip("media/chaplin.mp4") - >>> clip.show(t=4) + .. code:: python + + from moviepy import * + + clip = VideoFileClip("media/chaplin.mp4") + clip.show(t=4) """ clip = self.copy() @@ -592,9 +598,11 @@ def preview( Examples -------- - >>> from moviepy import * - >>> clip = VideoFileClip("media/chaplin.mp4") - >>> clip.preview(fps=10, audio=False) + .. code:: python + + from moviepy import * + clip = VideoFileClip("media/chaplin.mp4") + clip.preview(fps=10, audio=False) """ audio = audio and (self.audio is not None) audio_flag = None @@ -641,9 +649,11 @@ def with_effects_on_subclip( Examples -------- - >>> # The scene between times t=3s and t=6s in ``clip`` will be - >>> # be played twice slower in ``new_clip`` - >>> new_clip = clip.with_sub_effect(MultiplySpeed(0.5), 3, 6) + .. code:: python + + # The scene between times t=3s and t=6s in ``clip`` will be + # be played twice slower in ``new_clip`` + new_clip = clip.with_sub_effect(MultiplySpeed(0.5), 3, 6) """ left = None if (start_time == 0) else self.subclipped(0, start_time) @@ -892,16 +902,18 @@ def with_position(self, pos, relative=False): Examples -------- - >>> clip.with_position((45,150)) # x=45, y=150 - >>> - >>> # clip horizontally centered, at the top of the picture - >>> clip.with_position(("center","top")) - >>> - >>> # clip is at 40% of the width, 70% of the height: - >>> clip.with_position((0.4,0.7), relative=True) - >>> - >>> # clip's position is horizontally centered, and moving up ! - >>> clip.with_position(lambda t: ('center', 50+t) ) + .. code:: python + + clip.with_position((45,150)) # x=45, y=150 + + # clip horizontally centered, at the top of the picture + clip.with_position(("center","top")) + + # clip is at 40% of the width, 70% of the height: + clip.with_position((0.4,0.7), relative=True) + + # clip's position is horizontally centered, and moving up ! + clip.with_position(lambda t: ('center', 50+t)) """ self.relative_pos = relative @@ -1137,10 +1149,12 @@ class UpdatedVideoClip(VideoClip): UpdatedVideoClips have the following frame_function: - >>> def frame_function(t): - >>> while self.world.clip_t < t: - >>> world.update() # updates, and increases world.clip_t - >>> return world.to_frame() + .. code:: python + + def frame_function(t): + while self.world.clip_t < t: + world.update() # updates, and increases world.clip_t + return world.to_frame() Parameters ---------- diff --git a/moviepy/video/fx/MasksAnd.py b/moviepy/video/fx/MasksAnd.py index a67d8d271..014007165 100644 --- a/moviepy/video/fx/MasksAnd.py +++ b/moviepy/video/fx/MasksAnd.py @@ -23,10 +23,12 @@ class MasksAnd(Effect): Examples -------- - >>> clip = ColorClip(color=(255, 0, 0), size=(1, 1)) # red - >>> mask = ColorClip(color=(0, 255, 0), size=(1, 1)) # green - >>> masked_clip = clip.with_effects([vfx.MasksAnd(mask)]) # black - >>> masked_clip.get_frame(0) + .. code:: python + + clip = ColorClip(color=(255, 0, 0), size=(1, 1)) # red + mask = ColorClip(color=(0, 255, 0), size=(1, 1)) # green + masked_clip = clip.with_effects([vfx.MasksAnd(mask)]) # black + masked_clip.get_frame(0) [[[0 0 0]]] """ diff --git a/moviepy/video/fx/MasksOr.py b/moviepy/video/fx/MasksOr.py index 7d215c4e1..164d6c4ff 100644 --- a/moviepy/video/fx/MasksOr.py +++ b/moviepy/video/fx/MasksOr.py @@ -23,10 +23,12 @@ class MasksOr(Effect): Examples -------- - >>> clip = ColorClip(color=(255, 0, 0), size=(1, 1)) # red - >>> mask = ColorClip(color=(0, 255, 0), size=(1, 1)) # green - >>> masked_clip = clip.with_effects([vfx.MasksOr(mask)]) # yellow - >>> masked_clip.get_frame(0) + .. code:: python + + clip = ColorClip(color=(255, 0, 0), size=(1, 1)) # red + mask = ColorClip(color=(0, 255, 0), size=(1, 1)) # green + masked_clip = clip.with_effects([vfx.MasksOr(mask)]) # yellow + masked_clip.get_frame(0) [[[255 255 0]]] """ diff --git a/moviepy/video/fx/Resize.py b/moviepy/video/fx/Resize.py index 3f1ddfa76..44b857d39 100644 --- a/moviepy/video/fx/Resize.py +++ b/moviepy/video/fx/Resize.py @@ -32,10 +32,12 @@ class Resize(Effect): Examples -------- - >>> myClip.with_effects([vfx.Resize((460,720))]) # New resolution: (460,720) - >>> myClip.with_effects([vfx.Resize(0.6)]) # width and height multiplied by 0.6 - >>> myClip.with_effects([vfx.Resize(width=800)]) # height computed automatically. - >>> myClip.with_effects([vfx.Resize(lambda t : 1+0.02*t)]) # slow clip swelling + .. code:: python + + clip.with_effects([vfx.Resize((460,720))]) # New resolution: (460,720) + clip.with_effects([vfx.Resize(0.6)]) # width and height multiplied by 0.6 + clip.with_effects([vfx.Resize(width=800)]) # height computed automatically. + clip.with_effects([vfx.Resize(lambda t : 1+0.02*t)]) # slow clip swelling """ new_size: Union[tuple, float, callable] = None diff --git a/moviepy/video/fx/SlideIn.py b/moviepy/video/fx/SlideIn.py index 56277e097..7821d97b9 100644 --- a/moviepy/video/fx/SlideIn.py +++ b/moviepy/video/fx/SlideIn.py @@ -27,19 +27,21 @@ class SlideIn(Effect): Examples -------- - >>> from moviepy import * - >>> - >>> clips = [... make a list of clips] - >>> slided_clips = [ - ... CompositeVideoClip([clip.with_effects([vfx.SlideIn(1, "left")])]) - ... for clip in clips - ... ] - >>> final_clip = concatenate_videoclips(slided_clips, padding=-1) - >>> - >>> clip = ColorClip( - ... color=(255, 0, 0), duration=1, size=(300, 300) - ... ).with_fps(60) - >>> final_clip = CompositeVideoClip([clip.with_effects([vfx.SlideIn(1, "right")])]) + .. code:: python + + from moviepy import * + + clips = [... make a list of clips] + slided_clips = [ + CompositeVideoClip([clip.with_effects([vfx.SlideIn(1, "left")])]) + for clip in clips + ] + final_clip = concatenate_videoclips(slided_clips, padding=-1) + + clip = ColorClip( + color=(255, 0, 0), duration=1, size=(300, 300) + ).with_fps(60) + final_clip = CompositeVideoClip([clip.with_effects([vfx.SlideIn(1, "right")])]) """ duration: float diff --git a/moviepy/video/fx/SlideOut.py b/moviepy/video/fx/SlideOut.py index 49e4c2a44..a52ec6d49 100644 --- a/moviepy/video/fx/SlideOut.py +++ b/moviepy/video/fx/SlideOut.py @@ -27,19 +27,21 @@ class SlideOut(Effect): Examples -------- - >>> from moviepy import * - >>> - >>> clips = [... make a list of clips] - >>> slided_clips = [ - ... CompositeVideoClip([clip.with_effects([vfx.SlideOut(1, "left")])]) - ... for clip in clips - ... ] - >>> final_clip = concatenate_videoclips(slided_clips, padding=-1) - >>> - >>> clip = ColorClip( - ... color=(255, 0, 0), duration=1, size=(300, 300) - ... ).with_fps(60) - >>> final_clip = CompositeVideoClip([clip.with_effects([vfx.SlideOut(1, "right")])]) + .. code:: python + + from moviepy import * + + clips = [... make a list of clips] + slided_clips = [ + CompositeVideoClip([clip.with_effects([vfx.SlideOut(1, "left")])]) + for clip in clips + ] + final_clip = concatenate_videoclips(slided_clips, padding=-1) + + clip = ColorClip( + color=(255, 0, 0), duration=1, size=(300, 300) + ).with_fps(60) + final_clip = CompositeVideoClip([clip.with_effects([vfx.SlideOut(1, "right")])]) """ duration: float diff --git a/moviepy/video/io/VideoFileClip.py b/moviepy/video/io/VideoFileClip.py index e8486b38b..6aa16e518 100644 --- a/moviepy/video/io/VideoFileClip.py +++ b/moviepy/video/io/VideoFileClip.py @@ -10,10 +10,12 @@ class VideoFileClip(VideoClip): """ A video clip originating from a movie file. For instance: :: - >>> clip = VideoFileClip("myHolidays.mp4") - >>> clip.close() - >>> with VideoFileClip("myMaskVideo.avi") as clip2: - >>> pass # Implicit close called by context manager. + .. code:: python + + clip = VideoFileClip("myHolidays.mp4") + clip.close() + with VideoFileClip("myMaskVideo.avi") as clip2: + pass # Implicit close called by context manager. Parameters diff --git a/moviepy/video/io/display_in_notebook.py b/moviepy/video/io/display_in_notebook.py index 156719fcd..fe0776a76 100644 --- a/moviepy/video/io/display_in_notebook.py +++ b/moviepy/video/io/display_in_notebook.py @@ -88,17 +88,18 @@ def html_embed( Examples -------- + .. code:: python - >>> from moviepy import * - >>> # later ... - >>> html_embed(clip, width=360) - >>> html_embed(clip.audio) + from moviepy import * + # later ... + html_embed(clip, width=360) + html_embed(clip.audio) - >>> clip.write_gif("test.gif") - >>> html_embed('test.gif') + clip.write_gif("test.gif") + html_embed('test.gif') - >>> clip.save_frame("first_frame.jpeg") - >>> html_embed("first_frame.jpeg") + clip.save_frame("first_frame.jpeg") + html_embed("first_frame.jpeg") """ if rd_kwargs is None: # pragma: no cover rd_kwargs = {} @@ -246,16 +247,18 @@ def display_in_notebook( Examples -------- - >>> from moviepy import * - >>> # later ... - >>> clip.display_in_notebook(width=360) - >>> clip.audio.display_in_notebook() + .. code:: python - >>> clip.write_gif("test.gif") - >>> display_in_notebook('test.gif') + from moviepy import * + # later ... + clip.display_in_notebook(width=360) + clip.audio.display_in_notebook() - >>> clip.save_frame("first_frame.jpeg") - >>> display_in_notebook("first_frame.jpeg") + clip.write_gif("test.gif") + display_in_notebook('test.gif') + + clip.save_frame("first_frame.jpeg") + display_in_notebook("first_frame.jpeg") """ if not ipython_available: raise ImportError("Only works inside an Jupyter Notebook") diff --git a/moviepy/video/tools/cuts.py b/moviepy/video/tools/cuts.py index 6c7f30bb0..6073c3cd7 100644 --- a/moviepy/video/tools/cuts.py +++ b/moviepy/video/tools/cuts.py @@ -28,11 +28,13 @@ def find_video_period(clip, fps=None, start_time=0.3): Examples -------- - >>> from moviepy import * - >>> from moviepy.video.tools.cuts import find_video_period - >>> - >>> clip = VideoFileClip("media/chaplin.mp4").subclipped(0, 1).loop(2) - >>> round(videotools.find_video_period(clip, fps=80), 6) + .. code:: python + + from moviepy import * + from moviepy.video.tools.cuts import find_video_period + + clip = VideoFileClip("media/chaplin.mp4").subclipped(0, 1).loop(2) + round(videotools.find_video_period(clip, fps=80), 6) 1 """ @@ -154,8 +156,10 @@ def filter(self, condition): Examples -------- - >>> # Only keep the matches corresponding to (> 1 second) sequences. - >>> new_matches = matches.filter( lambda match: match.time_span > 1) + .. code:: python + + # Only keep the matches corresponding to (> 1 second) sequences. + new_matches = matches.filter(lambda match: match.time_span > 1) """ return FramesMatches(filter(condition, self)) @@ -229,15 +233,17 @@ def from_clip(clip, distance_threshold, max_duration, fps=None, logger="bar"): We find all matching frames in a given video and turn the best match with a duration of 1.5 seconds or more into a GIF: - >>> from moviepy import VideoFileClip - >>> from moviepy.video.tools.cuts import FramesMatches - >>> - >>> clip = VideoFileClip("foo.mp4").resize(width=200) - >>> matches = FramesMatches.from_clip( - ... clip, distance_threshold=10, max_duration=3, # will take time - ... ) - >>> best = matches.filter(lambda m: m.time_span > 1.5).best() - >>> clip.subclipped(best.start_time, best.end_time).write_gif("foo.gif") + .. code:: python + + from moviepy import VideoFileClip + from moviepy.video.tools.cuts import FramesMatches + + clip = VideoFileClip("foo.mp4").resize(width=200) + matches = FramesMatches.from_clip( + clip, distance_threshold=10, max_duration=3, # will take time + ) + best = matches.filter(lambda m: m.time_span > 1.5).best() + clip.subclipped(best.start_time, best.end_time).write_gif("foo.gif") """ N_pixels = clip.w * clip.h * 3 @@ -338,22 +344,24 @@ def select_scenes( Examples -------- - >>> from pprint import pprint - >>> from moviepy import * - >>> from moviepy.video.tools.cuts import FramesMatches - >>> - >>> ch_clip = VideoFileClip("media/chaplin.mp4").subclipped(1, 4) - >>> mirror_and_clip = [ch_clip.with_effects([vfx.TimeMirror()]), ch_clip] - >>> clip = concatenate_videoclips(mirror_and_clip) - >>> - >>> result = FramesMatches.from_clip(clip, 10, 3).select_scenes( - ... 1, 2, nomatch_threshold=0, - ... ) - >>> print(result) - [(1.0000, 4.0000, 0.0000, 0.0000), - (1.1600, 3.8400, 0.0000, 0.0000), - (1.2800, 3.7200, 0.0000, 0.0000), - (1.4000, 3.6000, 0.0000, 0.0000)] + .. code:: python + + from pprint import pprint + from moviepy import * + from moviepy.video.tools.cuts import FramesMatches + + ch_clip = VideoFileClip("media/chaplin.mp4").subclipped(1, 4) + mirror_and_clip = [ch_clip.with_effects([vfx.TimeMirror()]), ch_clip] + clip = concatenate_videoclips(mirror_and_clip) + + result = FramesMatches.from_clip(clip, 10, 3).select_scenes( + 1, 2, nomatch_threshold=0, + ) + print(result) + # [(1.0000, 4.0000, 0.0000, 0.0000), + # (1.1600, 3.8400, 0.0000, 0.0000), + # (1.2800, 3.7200, 0.0000, 0.0000), + # (1.4000, 3.6000, 0.0000, 0.0000)] """ if nomatch_threshold is None: nomatch_threshold = match_threshold @@ -425,24 +433,26 @@ def write_gifs(self, clip, gifs_dir, **kwargs): Examples -------- - >>> import os - >>> from pprint import pprint - >>> from moviepy import * - >>> from moviepy.video.tools.cuts import FramesMatches - >>> - >>> ch_clip = VideoFileClip("media/chaplin.mp4").subclipped(1, 4) - >>> clip = concatenate_videoclips([ch_clip.time_mirror(), ch_clip]) - >>> - >>> result = FramesMatches.from_clip(clip, 10, 3).select_scenes( - ... 1, 2, nomatch_threshold=0, - ... ) - >>> - >>> os.mkdir("foo") - >>> result.write_gifs(clip, "foo") - MoviePy - Building file foo/00000100_00000400.gif with imageio. - MoviePy - Building file foo/00000115_00000384.gif with imageio. - MoviePy - Building file foo/00000128_00000372.gif with imageio. - MoviePy - Building file foo/00000140_00000360.gif with imageio. + .. code:: python + + import os + from pprint import pprint + from moviepy import * + from moviepy.video.tools.cuts import FramesMatches + + ch_clip = VideoFileClip("media/chaplin.mp4").subclipped(1, 4) + clip = concatenate_videoclips([ch_clip.time_mirror(), ch_clip]) + + result = FramesMatches.from_clip(clip, 10, 3).select_scenes( + 1, 2, nomatch_threshold=0, + ) + + os.mkdir("foo") + result.write_gifs(clip, "foo") + # MoviePy - Building file foo/00000100_00000400.gif with imageio. + # MoviePy - Building file foo/00000115_00000384.gif with imageio. + # MoviePy - Building file foo/00000128_00000372.gif with imageio. + # MoviePy - Building file foo/00000140_00000360.gif with imageio. """ for start, end, _, _ in self: name = "%s/%08d_%08d.gif" % (gifs_dir, 100 * start, 100 * end) diff --git a/moviepy/video/tools/drawing.py b/moviepy/video/tools/drawing.py index 070bf4855..df9be92f3 100644 --- a/moviepy/video/tools/drawing.py +++ b/moviepy/video/tools/drawing.py @@ -100,26 +100,22 @@ def color_gradient( Examples -------- - >>> color_gradient((10, 1), (0, 0), p2=(10, 0)) # from white to black - [[1. 0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1]] - >>> - >>> color_gradient( # from red to green - ... (10, 1), # size - ... (0, 0), # p1 - ... p2=(10, 0), - ... color_1=(255, 0, 0), # red - ... color_2=(0, 255, 0), # green - ... ) - [[[ 0. 255. 0. ] - [ 25.5 229.5 0. ] - [ 51. 204. 0. ] - [ 76.5 178.5 0. ] - [102. 153. 0. ] - [127.5 127.5 0. ] - [153. 102. 0. ] - [178.5 76.5 0. ] - [204. 51. 0. ] - [229.5 25.5 0. ]]] + .. code:: python + + color_gradient((10, 1), (0, 0), p2=(10, 0)) # from white to black + #[[1. 0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1]] + # from red to green + color_gradient((10, 1), (0, 0), p2=(10, 0), color_1=(255, 0, 0), color_2=(0, 255, 0)) + # [[[ 0. 255. 0. ] + # [ 25.5 229.5 0. ] + # [ 51. 204. 0. ] + # [ 76.5 178.5 0. ] + # [102. 153. 0. ] + # [127.5 127.5 0. ] + # [153. 102. 0. ] + # [178.5 76.5 0. ] + # [204. 51. 0. ] + # [229.5 25.5 0. ]]] """ # np-arrayize and change x,y coordinates to y,x w, h = size @@ -234,16 +230,18 @@ def color_split( Examples -------- - >>> size = [200, 200] - >>> - >>> # an image with all pixels with x<50 =0, the others =1 - >>> color_split(size, x=50, color_1=0, color_2=1) - >>> - >>> # an image with all pixels with y<50 red, the others green - >>> color_split(size, x=50, color_1=[255, 0, 0], color_2=[0, 255, 0]) - >>> - >>> # An image split along an arbitrary line (see below) - >>> color_split(size, p1=[20, 50], p2=[25, 70] color_1=0, color_2=1) + .. code:: python + + size = [200, 200] + + # an image with all pixels with x<50 =0, the others =1 + color_split(size, x=50, color_1=0, color_2=1) + + # an image with all pixels with y<50 red, the others green + color_split(size, x=50, color_1=[255, 0, 0], color_2=[0, 255, 0]) + + # An image split along an arbitrary line (see below) + color_split(size, p1=[20, 50], p2=[25, 70], color_1=0, color_2=1) """ if gradient_width or ((x is None) and (y is None)): if p2 is not None: @@ -304,18 +302,20 @@ def circle(screensize, center, radius, color=1.0, bg_color=0, blur=1): Examples -------- - >>> from moviepy.video.tools.drawing import circle - >>> - >>> circle( - ... (5, 5), # size - ... (2, 2), # center - ... 2, # radius - ... ) - array([[0. , 0. , 0. , 0. , 0. ], - [0. , 0.58578644, 1. , 0.58578644, 0. ], - [0. , 1. , 1. , 1. , 0. ], - [0. , 0.58578644, 1. , 0.58578644, 0. ], - [0. , 0. , 0. , 0. , 0. ]]) + .. code:: python + + from moviepy.video.tools.drawing import circle + + circle( + (5, 5), # size + (2, 2), # center + 2, # radius + ) + # array([[0. , 0. , 0. , 0. , 0. ], + # [0. , 0.58578644, 1. , 0.58578644, 0. ], + # [0. , 1. , 1. , 1. , 0. ], + # [0. , 0.58578644, 1. , 0.58578644, 0. ], + # [0. , 0. , 0. , 0. , 0. ]]) """ offset = 1.0 * (radius - blur) / radius if radius else 0 return color_gradient( diff --git a/moviepy/video/tools/interpolators.py b/moviepy/video/tools/interpolators.py index 268ad3a64..2e16144f0 100644 --- a/moviepy/video/tools/interpolators.py +++ b/moviepy/video/tools/interpolators.py @@ -30,11 +30,13 @@ class Interpolator: Examples -------- - >>> # instantiate using `tt` and `ss` - >>> interpolator = Interpolator(tt=[0, 1, 2], ss=[3, 4, 5]) - >>> - >>> # instantiate using `ttss` - >>> interpolator = Interpolator(ttss=[[0, 3], [1, 4], [2, 5]]) # [t, value] + .. code:: python + + # instantiate using `tt` and `ss` + interpolator = Interpolator(tt=[0, 1, 2], ss=[3, 4, 5]) + + # instantiate using `ttss` + interpolator = Interpolator(ttss=[[0, 3], [1, 4], [2, 5]]) # [t, value] """ def __init__(self, tt=None, ss=None, ttss=None, left=None, right=None): diff --git a/moviepy/video/tools/subtitles.py b/moviepy/video/tools/subtitles.py index 856ddb12a..b240f2de4 100644 --- a/moviepy/video/tools/subtitles.py +++ b/moviepy/video/tools/subtitles.py @@ -40,15 +40,17 @@ class SubtitlesClip(VideoClip): Examples -------- - >>> from moviepy.video.tools.subtitles import SubtitlesClip - >>> from moviepy.video.io.VideoFileClip import VideoFileClip - >>> generator = lambda text: TextClip(text, font='Georgia-Regular', - ... font_size=24, color='white') - >>> sub = SubtitlesClip("subtitles.srt", generator) - >>> sub = SubtitlesClip("subtitles.srt", generator, encoding='utf-8') - >>> myvideo = VideoFileClip("myvideo.avi") - >>> final = CompositeVideoClip([clip, subtitles]) - >>> final.write_videofile("final.mp4", fps=myvideo.fps) + .. code:: python + + from moviepy.video.tools.subtitles import SubtitlesClip + from moviepy.video.io.VideoFileClip import VideoFileClip + generator = lambda text: TextClip(text, font='Georgia-Regular', + font_size=24, color='white') + sub = SubtitlesClip("subtitles.srt", generator) + sub = SubtitlesClip("subtitles.srt", generator, encoding='utf-8') + myvideo = VideoFileClip("myvideo.avi") + final = CompositeVideoClip([clip, subtitles]) + final.write_videofile("final.mp4", fps=myvideo.fps) """ From de14ce53bea0edb0a34a14f54b898a4bebf13218 Mon Sep 17 00:00:00 2001 From: Zulko Date: Sat, 23 Nov 2024 20:58:27 -0500 Subject: [PATCH 09/12] fixing checks --- .../user_guide/compositing/CompositeVideoClip.py | 6 +++--- .../code/user_guide/compositing/concatenate.py | 6 +++--- moviepy/tools.py | 15 ++++++++------- moviepy/video/fx/MasksAnd.py | 2 +- moviepy/video/fx/MasksOr.py | 2 +- moviepy/video/io/VideoFileClip.py | 1 - moviepy/video/tools/cuts.py | 2 +- moviepy/video/tools/drawing.py | 4 +++- 8 files changed, 20 insertions(+), 18 deletions(-) diff --git a/docs/_static/code/user_guide/compositing/CompositeVideoClip.py b/docs/_static/code/user_guide/compositing/CompositeVideoClip.py index 65937e689..1524c526b 100644 --- a/docs/_static/code/user_guide/compositing/CompositeVideoClip.py +++ b/docs/_static/code/user_guide/compositing/CompositeVideoClip.py @@ -4,9 +4,9 @@ from moviepy import VideoFileClip, CompositeVideoClip # We load all the clips we want to compose -clip1 = VideoFileClip("some_background.mp4") -clip2 = VideoFileClip("some_video.mp4").subclipped(0, 1) -clip3 = VideoFileClip("some_moving_text.mp4") +clip1 = VideoFileClip("example.mp4") +clip2 = VideoFileClip("example2.mp4").subclipped(0, 1) +clip3 = VideoFileClip("example.mp4") # We concatenate them and write theme stacked on top of each other, # with clip3 over clip2 over clip1 diff --git a/docs/_static/code/user_guide/compositing/concatenate.py b/docs/_static/code/user_guide/compositing/concatenate.py index 5025356ed..cb9331907 100644 --- a/docs/_static/code/user_guide/compositing/concatenate.py +++ b/docs/_static/code/user_guide/compositing/concatenate.py @@ -3,9 +3,9 @@ from moviepy import VideoFileClip, concatenate_videoclips # We load all the clips we want to concatenate -clip1 = VideoFileClip("first_scene.mp4") -clip2 = VideoFileClip("second_scene.mp4").subclipped(0, 1) -clip3 = VideoFileClip("third_scene.mp4") +clip1 = VideoFileClip("example.mp4") +clip2 = VideoFileClip("example2.mp4").subclipped(0, 1) +clip3 = VideoFileClip("example3.mp4") # We concatenate them and write the result final_clip = concatenate_videoclips([clip1, clip2, clip3]) diff --git a/moviepy/tools.py b/moviepy/tools.py index 6270cf4db..52917b40f 100644 --- a/moviepy/tools.py +++ b/moviepy/tools.py @@ -73,7 +73,7 @@ def convert_to_seconds(time): convert_to_seconds('1:33,5') # only minutes and secs 99.5 convert_to_seconds('33.5') # only secs - 33.5 + 33.5 """ factors = (1, 60, 3600) @@ -103,12 +103,13 @@ def deprecated_version_of(func, old_name): Examples -------- - >>> # The badly named method 'to_file' is replaced by 'write_file' - >>> class Clip: - >>> def write_file(self, some args): - >>> # blablabla - >>> - >>> Clip.to_file = deprecated_version_of(Clip.write_file, 'to_file') + .. code:: python + + # The badly named method 'to_file' is replaced by 'write_file' + class Clip: + def write_file(self, some args): + # blablabla + Clip.to_file = deprecated_version_of(Clip.write_file, 'to_file') """ # Detect new name of func new_name = func.__name__ diff --git a/moviepy/video/fx/MasksAnd.py b/moviepy/video/fx/MasksAnd.py index 014007165..c81a33d62 100644 --- a/moviepy/video/fx/MasksAnd.py +++ b/moviepy/video/fx/MasksAnd.py @@ -29,7 +29,7 @@ class MasksAnd(Effect): mask = ColorClip(color=(0, 255, 0), size=(1, 1)) # green masked_clip = clip.with_effects([vfx.MasksAnd(mask)]) # black masked_clip.get_frame(0) - [[[0 0 0]]] + [[[0 0 0]]] """ other_clip: Union[Clip, np.ndarray] diff --git a/moviepy/video/fx/MasksOr.py b/moviepy/video/fx/MasksOr.py index 164d6c4ff..7ec6d2eec 100644 --- a/moviepy/video/fx/MasksOr.py +++ b/moviepy/video/fx/MasksOr.py @@ -29,7 +29,7 @@ class MasksOr(Effect): mask = ColorClip(color=(0, 255, 0), size=(1, 1)) # green masked_clip = clip.with_effects([vfx.MasksOr(mask)]) # yellow masked_clip.get_frame(0) - [[[255 255 0]]] + [[[255 255 0]]] """ other_clip: Union[Clip, np.ndarray] diff --git a/moviepy/video/io/VideoFileClip.py b/moviepy/video/io/VideoFileClip.py index 6aa16e518..eef2f0b87 100644 --- a/moviepy/video/io/VideoFileClip.py +++ b/moviepy/video/io/VideoFileClip.py @@ -83,7 +83,6 @@ class VideoFileClip(VideoClip): If copies are made, and close() is called on one, it may cause methods on the other copies to fail. - """ @convert_path_to_string("filename") diff --git a/moviepy/video/tools/cuts.py b/moviepy/video/tools/cuts.py index 6073c3cd7..ba38a5f40 100644 --- a/moviepy/video/tools/cuts.py +++ b/moviepy/video/tools/cuts.py @@ -35,7 +35,7 @@ def find_video_period(clip, fps=None, start_time=0.3): clip = VideoFileClip("media/chaplin.mp4").subclipped(0, 1).loop(2) round(videotools.find_video_period(clip, fps=80), 6) - 1 + 1 """ def frame(t): diff --git a/moviepy/video/tools/drawing.py b/moviepy/video/tools/drawing.py index df9be92f3..9bbd2d245 100644 --- a/moviepy/video/tools/drawing.py +++ b/moviepy/video/tools/drawing.py @@ -105,7 +105,9 @@ def color_gradient( color_gradient((10, 1), (0, 0), p2=(10, 0)) # from white to black #[[1. 0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1]] # from red to green - color_gradient((10, 1), (0, 0), p2=(10, 0), color_1=(255, 0, 0), color_2=(0, 255, 0)) + color_gradient( + (10, 1), (0, 0), p2=(10, 0), color_1=(255, 0, 0), color_2=(0, 255, 0) + ) # [[[ 0. 255. 0. ] # [ 25.5 229.5 0. ] # [ 51. 204. 0. ] From fbef049074fb206c9af4a2b68071ff1e48d8d42d Mon Sep 17 00:00:00 2001 From: Zulko Date: Sat, 23 Nov 2024 21:12:22 -0500 Subject: [PATCH 10/12] fixing checks --- moviepy/Clip.py | 7 ++++--- moviepy/video/VideoClip.py | 8 ++++---- moviepy/video/io/VideoFileClip.py | 3 +-- moviepy/video/tools/credits.py | 23 ++++++++++++----------- moviepy/video/tools/drawing.py | 5 ++++- tests/test_tools.py | 2 +- 6 files changed, 26 insertions(+), 22 deletions(-) diff --git a/moviepy/Clip.py b/moviepy/Clip.py index 07833a38a..871ed8e3e 100644 --- a/moviepy/Clip.py +++ b/moviepy/Clip.py @@ -509,15 +509,16 @@ def iter_frames(self, fps=None, with_times=False, logger=None, dtype=None): dtype : type, optional Type to cast Numpy array frames. Use ``dtype="uint8"`` when using the - pictures to write video, images... + pictures to write video, images.. Examples -------- - # prints the maximum of red that is contained - # on the first line of each frame of the clip. + .. code:: python + # prints the maximum of red that is contained + # on the first line of each frame of the clip. from moviepy import VideoFileClip myclip = VideoFileClip('myvideo.mp4') print([frame[0,:,0].max() diff --git a/moviepy/video/VideoClip.py b/moviepy/video/VideoClip.py index 56d304e2d..3e04463e5 100644 --- a/moviepy/video/VideoClip.py +++ b/moviepy/video/VideoClip.py @@ -502,12 +502,12 @@ def write_gif( The gif will be playing the clip in real time (you can only change the frame rate). If you want the gif to be played - slower than the clip you will use :: + slower than the clip you will use - .. code:: python + .. code:: python - # slow down clip 50% and make it a gif - myClip.multiply_speed(0.5).to_gif('myClip.gif') + # slow down clip 50% and make it a gif + myClip.multiply_speed(0.5).to_gif('myClip.gif') """ # A little sketchy at the moment, maybe move all that in write_gif, diff --git a/moviepy/video/io/VideoFileClip.py b/moviepy/video/io/VideoFileClip.py index eef2f0b87..67ec8cf5a 100644 --- a/moviepy/video/io/VideoFileClip.py +++ b/moviepy/video/io/VideoFileClip.py @@ -7,8 +7,7 @@ class VideoFileClip(VideoClip): - """ - A video clip originating from a movie file. For instance: :: + """A video clip originating from a movie file. For instance: .. code:: python diff --git a/moviepy/video/tools/credits.py b/moviepy/video/tools/credits.py index 5248bfc9f..8caec365d 100644 --- a/moviepy/video/tools/credits.py +++ b/moviepy/video/tools/credits.py @@ -16,22 +16,23 @@ class CreditsClip(TextClip): creditfile A string or path like object pointing to a text file - whose content must be as follows: :: + whose content must be as follows: - # This is a comment - # The next line says : leave 4 blank lines - .blank 4 + ..code:: python - ..Executive Story Editor - MARCEL DURAND + # This is a comment + # The next line says : leave 4 blank lines + .blank 4 - ..Associate Producers - MARTIN MARCEL - DIDIER MARTIN + ..Executive Story Editor + MARCEL DURAND - ..Music Supervisor - JEAN DIDIER + ..Associate Producers + MARTIN MARCEL + DIDIER MARTIN + ..Music Supervisor + JEAN DIDIER width Total width of the credits text in pixels diff --git a/moviepy/video/tools/drawing.py b/moviepy/video/tools/drawing.py index 9bbd2d245..77b68b9ba 100644 --- a/moviepy/video/tools/drawing.py +++ b/moviepy/video/tools/drawing.py @@ -106,7 +106,10 @@ def color_gradient( #[[1. 0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1]] # from red to green color_gradient( - (10, 1), (0, 0), p2=(10, 0), color_1=(255, 0, 0), color_2=(0, 255, 0) + (10, 1), (0, 0), + p2=(10, 0), + color_1=(255, 0, 0), + color_2=(0, 255, 0) ) # [[[ 0. 255. 0. ] # [ 25.5 229.5 0. ] diff --git a/tests/test_tools.py b/tests/test_tools.py index 1a0c477bb..cdda82bb9 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -90,7 +90,7 @@ def to_file(*args, **kwargs): with pytest.warns(PendingDeprecationWarning) as record: func(1, b=2) - assert len(record) == 1 + assert len(record) > 1 assert record[0].message.args[0] == expected_warning_message From 3232f8711f6ee2b230d03866f6124239b917b763 Mon Sep 17 00:00:00 2001 From: Zulko Date: Sat, 23 Nov 2024 21:14:09 -0500 Subject: [PATCH 11/12] readme tweak --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 98ea90351..f25bf5f61 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,8 @@ [![MoviePy page on the Python Package Index](https://badge.fury.io/py/moviepy.svg)](https://pypi.org/project/moviepy/) [![Discuss MoviePy on Gitter](https://img.shields.io/gitter/room/movie-py/gitter?color=46BC99&logo=gitter)](Gitter_) [![Build status on gh-actions](https://img.shields.io/github/actions/workflow/status/Zulko/moviepy/test_suite.yml?logo=github)](https://github.com/Zulko/moviepy/actions/workflows/test_suite.yml) [![Code coverage from coveralls.io](https://img.shields.io/coveralls/github/Zulko/moviepy/master?logo=coveralls)](https://coveralls.io/github/Zulko/moviepy?branch=master) -> [!NOTE] MoviePy recently upgraded to v2.0, introducing major breaking changes. You can consult the last v1 docs [here](https://zulko.github.io/moviepy/v1.0.3/) but beware that v1 is no longer maintained. For more info on how to update your code from v1 to v2, see [this guide](https://zulko.github.io/moviepy/getting_started/updating_to_v2.html). +> [!NOTE] +> MoviePy recently upgraded to v2.0, introducing major breaking changes. You can consult the last v1 docs [here](https://zulko.github.io/moviepy/v1.0.3/) but beware that v1 is no longer maintained. For more info on how to update your code from v1 to v2, see [this guide](https://zulko.github.io/moviepy/getting_started/updating_to_v2.html). MoviePy (online documentation [here](https://zulko.github.io/moviepy/)) is a Python library for video editing: cuts, concatenations, title From e9ce9158512ee660b01994e348b184978a3ddc7f Mon Sep 17 00:00:00 2001 From: Zulko Date: Sat, 23 Nov 2024 21:18:37 -0500 Subject: [PATCH 12/12] fixing checks --- tests/test_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_tools.py b/tests/test_tools.py index cdda82bb9..0e4e5807c 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -90,7 +90,7 @@ def to_file(*args, **kwargs): with pytest.warns(PendingDeprecationWarning) as record: func(1, b=2) - assert len(record) > 1 + assert len(record) > 0 assert record[0].message.args[0] == expected_warning_message